input
stringlengths
2.65k
237k
output
stringclasses
1 value
<reponame>CMPUT404F21TEAM/social-distribution import base64 from django.contrib import messages from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.http.response import * from django.http import HttpResponse, JsonResponse from django.http.response import HttpResponseBadRequest from django.shortcuts import redirect, get_object_or_404 from django.views import View from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie from django.utils.decorators import method_decorator from datetime import datetime, timezone import json import logging import base64 from cmput404.constants import API_BASE import socialDistribution.requests as api_requests from socialDistribution.models import * from .decorators import validate_user, validate_node from .parsers import url_parser from .utility import getPaginated, makeInboxPost, makeLocalPost # References for entire file: # Django Software Foundation, "Introduction to class-based views", 2021-10-13 # https://docs.djangoproject.com/en/3.2/topics/class-based-views/intro/ # Django Software Foundation, "JsonResponse objects", 2021-10-13 # https://docs.djangoproject.com/en/3.2/ref/request-response/#jsonresponse-objects # Need to disable CSRF to make POST, PUT, etc requests. Otherwise, your request needs to contain 'X--CSRFToken: blahblah' with a CSRF token. # If we need CSRF validation in the future, just remove the csrf_exempt decorators. # # <NAME>, https://stackoverflow.com/users/6945548/martijn-ten-hoor, "How to disable Django's CSRF validation?", # 2016-10-12, https://stackoverflow.com/a/39993384, CC BY-SA 3.0 # # Note: @ensure_crsf_cookie will send the token in the response # <NAME>, https://stackoverflow.com/users/3904557/ryan-pergent, "how do I use ensure_csrf_cookie?", # 2017-05-30, https://stackoverflow.com/a/43712324, CC BY-SA 3.0 # Django Software Foundation, "Logging", https://docs.djangoproject.com/en/3.2/topics/logging/ logger = logging.getLogger("api") def index(request): response = { "message": "Welcome to the Social Distribution API for T04", "documentation": "https://github.com/CMPUT404F21TEAM/social-distribution/wiki/Web-Service-API-Documentation", "authors": f'{API_BASE}/authors/' } return JsonResponse(response) @method_decorator(csrf_exempt, name='dispatch') class AuthorsView(View): def get(self, request): """ GET - Retrieve all user profiles 'page' is indexed from 1, NOT 0. 'size' must be greater than 0 """ logger.info(f"GET /authors API endpoint invoked") authors = LocalAuthor.objects.order_by('created_date') page = request.GET.get("page") size = request.GET.get("size") if page and size: page = int(page) size = int(size) try: if page < 1 or size < 1: return HttpResponseBadRequest("Malformed query: page and size must be > 0") except Exception as e: logger.error(e, exc_info=True) return HttpResponseBadRequest(e) authors = getPaginated(authors, page, size) authors = [author.as_json() for author in authors] response = { "type": "authors", "items": authors } return JsonResponse(response) @method_decorator(csrf_exempt, name='dispatch') class AuthorView(View): def get(self, request, author_id): """ GET - Retrieve profile of {author_id} """ logger.info(f"GET /authors/{author_id} API endpoint invoked") author = get_object_or_404(LocalAuthor, pk=author_id) response = author.as_json() return JsonResponse(response) @method_decorator(validate_user) def post(self, request, author_id): """ POST - Update profile of {author_id} """ logger.info(f"POST /authors/{author_id} API endpoint invoked") author = get_object_or_404(LocalAuthor, id=author_id) djangoUser = author.user try: data = json.loads(request.body) # extract post data if data.get('displayName'): author.displayName = data.get('displayName') if data.get('github'): author.githubUrl = data.get('github') if data.get('email'): djangoUser.email = data.get('email') if data.get('profileImage'): author.profileImageUrl = data.get('profileImage') # update author author.save() # update django user djangoUser.save() except json.decoder.JSONDecodeError: return JsonResponse({ "error": "Invalid JSON" }, status=400) except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() return JsonResponse(author.as_json()) @method_decorator(csrf_exempt, name='dispatch') class FollowersView(View): def get(self, request, author_id): """ GET - Get a list of authors who are the followers of {author_id} """ logger.info(f"GET /authors/{author_id}/followers API endpoint invoked") author = get_object_or_404(LocalAuthor, pk=author_id) followers = [follow.actor.as_json() for follow in author.follows.all()] response = { "type": "followers", "items": followers } return JsonResponse(response) @method_decorator(csrf_exempt, name='dispatch') class FollowersSingleView(View): def get(self, request, author_id, foreign_author_id): """ GET - Check if {foreign_author_id} is a follower of {author_id} """ logger.info(f"GET /author/{author_id}/followers/{foreign_author_id} API endpoint invoked") author = get_object_or_404(LocalAuthor, pk=author_id) try: # try to find and return follower author object follower = Author.objects.get(url=foreign_author_id) follow = author.follows.get(actor=follower) response = follow.actor.as_json() return JsonResponse(response) except (Author.DoesNotExist, Follow.DoesNotExist): # return 404 if author not found return HttpResponseNotFound() @method_decorator(validate_user) def put(self, request, author_id, foreign_author_id): """ PUT - Add {foreign_author_id} as a follower of {author_id} """ logger.info(f"PUT /author/{author_id}/followers/{foreign_author_id} API endpoint invoked") author = get_object_or_404(LocalAuthor, pk=author_id) follower, created = Author.objects.get_or_create(url=foreign_author_id) follow_obj = Follow.objects.create( object=author, actor=follower ) author.follows.add(follow_obj) # django doesn't duplicate relations response = follower.as_json() return JsonResponse(response) @method_decorator(validate_node) def delete(self, request, author_id, foreign_author_id): """ DELETE - Remove {foreign_author_id} as a follower of {author_id} """ logger.info(f"DELETE /author/{author_id}/followers/{foreign_author_id} API endpoint invoked") author = get_object_or_404(LocalAuthor, pk=author_id) try: # try to find and delete follower author object follower = Author.objects.get(url=foreign_author_id) follow = author.follows.get(actor=follower) follow.delete() return HttpResponse(status=204) # no content except (Author.DoesNotExist, Follow.DoesNotExist): # return 404 if author not found return HttpResponseNotFound() @method_decorator(csrf_exempt, name='dispatch') class LikedView(View): def get(self, request, author_id): """ GET - Get a list of like objects from {author_id} """ logger.info(f"GET /author/{author_id}/liked API endpoint invoked") author = get_object_or_404(LocalAuthor, id=author_id) try: author_liked_posts = LocalPost.objects.filter( likes__author=author, visibility=LocalPost.Visibility.PUBLIC ) author_liked_comments = Comment.objects.filter(likes__author=author) likes = [] for post in author_liked_posts: like = { "@context": "https://www.w3.org/ns/activitystreams", "summary": f"{author.displayName} Likes your post", "type": "Like", "author": author.as_json(), "object": f"{API_BASE}/author/{post.author.id}/posts/{post.id}" } likes.append(like) for comment in author_liked_comments: like = { "@context": "https://www.w3.org/ns/activitystreams", "summary": f"{author.displayName} Likes your comment", "type": "Like", "author": author.as_json(), "object": f"{API_BASE}/author/{comment.post.author.id}/posts/{comment.post.id}/comments/{comment.id}" } likes.append(like) response = { "type": "liked", "items": likes } except LocalAuthor.DoesNotExist: return HttpResponseNotFound() except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() return JsonResponse(response) @method_decorator(csrf_exempt, name='dispatch') class PostsView(View): def get(self, request, author_id): logger.info(f"GET /author/{author_id}/posts API endpoint invoked") author = get_object_or_404(LocalAuthor, id=author_id) # Send all PUBLIC posts try: page = request.GET.get("page") size = request.GET.get("size") posts = LocalPost.objects.listed().get_public().filter(author=author).order_by('pk') if page and size: page = int(page) size = int(size) try: if page < 1 or size < 1: return HttpResponseBadRequest({ "error": "Malformed query: page and size must be > 0" }) except Exception as e: return HttpResponseBadRequest() posts = getPaginated(posts, page, size) posts = [post.as_json() for post in posts] response = { "type": "posts", "page": page, "size": size, "items": posts } except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() return JsonResponse(response) @method_decorator(validate_user) def post(self, request, author_id): ''' POST - creates a LocalPost for the given {author_id} with the given data ''' try: data = json.loads(request.body) post = makeLocalPost(data, author_id) except json.decoder.JSONDecodeError as e: return JsonResponse({ "error": "Invalid JSON: " + e.msg }, status=400) except Exception as e: logger.error(e, exc_info=True) return JsonResponse({ "error": "An unknown error occurred" }, status=500) return JsonResponse(status=201, data=post.as_json()) @method_decorator(csrf_exempt, name='dispatch') class PostView(View): def get(self, request, author_id, post_id): """ GET - Get json for post {post_id} """ logger.info(f"GET /author/{author_id}/posts/{post_id} API endpoint invoked") try: post = LocalPost.objects.get(id=post_id) response = post.as_json() except LocalPost.DoesNotExist: return HttpResponseNotFound() except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() return JsonResponse(response) @method_decorator(validate_user) def delete(self, request, author_id, post_id): """ DELETE - Delete post {post_id} """ logger.info(f"DELETE /author/{author_id}/posts/{post_id} API endpoint invoked") try: post = LocalPost.objects.get(id=post_id) post.delete() except LocalPost.DoesNotExist: return HttpResponseNotFound() except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() return HttpResponse(200) @method_decorator(validate_user) def post(self, request, author_id, post_id): """ POST - Update post {post_id} """ logger.info(f"POST /author/{author_id}/posts/{post_id} API endpoint invoked") post = get_object_or_404(LocalPost, id=post_id) data = json.loads(request.body) try: post.title = data['title'] post.description = data['description'] post.content_type = data['contentType'] post.content = data['content'].encode('utf-8') post.visibility = data['visibility'] post.unlisted = data['unlisted'] categories = data['categories'] if categories is not None: categories_to_remove = [cat.category for cat in post.categories.all()] """ This implementation makes category names case-insensitive. This makes handling Category objects cleaner, albeit slightly more involved. """ for category in categories: category_obj, created = Category.objects.get_or_create( category__iexact=category, defaults={'category': category} ) post.categories.add(category_obj) while category_obj.category in categories_to_remove: categories_to_remove.remove(category_obj.category) # don't remove this category for category in categories_to_remove: category_obj = Category.objects.get(category=category) post.categories.remove(category_obj) post.save() return JsonResponse(status=201, data=post.as_json()) except ValidationError: return HttpResponseBadRequest() except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() @method_decorator(validate_user) def put(self, request, author_id, post_id): ''' PUT - creates a LocalPost for the given {author_id} with the given data with the given {post_id} ''' get_object_or_404(LocalAuthor, id=author_id) try: data = json.loads(request.body) post = makeLocalPost(data, author_id, post_id) except json.decoder.JSONDecodeError: return JsonResponse({ "error": "Invalid JSON" }, status=400) except ValidationError: return JsonResponse({ "error": "Not a valid UUID" }, status=400) except Exception as e: logger.error(e, exc_info=True) return JsonResponse({ "error": "An unknown error occurred" }, status=500) return JsonResponse(status=201, data=post.as_json()) @method_decorator(csrf_exempt, name='dispatch') class PostLikesView(View): def get(self, request, author_id, post_id): """ GET - Get a list of authors who like {post_id} """ logger.info(f"GET /author/{author_id}/posts/{post_id}/likes API endpoint invoked") author = get_object_or_404(Author, id=author_id) post = get_object_or_404(LocalPost, id=post_id, author=author) try: post_likes = post.likes.all() items = [] for like in post_likes: like_author_json = like.author.as_json() like = { "@context": "https://www.w3.org/ns/activitystreams", "summary": f"{like_author_json['displayName']} Likes your post", "type": "Like", "author": like_author_json, "object": f"{API_BASE}/author/{post.author.id}/posts/{post.id}" } items.append(like) response = { "type": "likes", "items": items } except Exception as e: logger.error(e, exc_info=True) return HttpResponseServerError() return JsonResponse(response) @method_decorator(csrf_exempt, name='dispatch') class PostCommentsView(View): ''' HANDLE Comment GET and POST ''' def get(self, request, author_id, post_id): logger.info(f"GET /author/{author_id}/posts/{post_id}/comments API endpoint invoked") post = get_object_or_404(LocalPost, id=post_id) author = get_object_or_404(LocalAuthor, id=author_id) # Send all comments try: page = request.GET.get("page") size = request.GET.get("size") # Check if the post author match with author in url if
easily collected into the tree of tests, and have their public #test methods called. It is worth noting that, for a given TestCase_, an #instance of the test fixture class will be constructed prior to the #test method being called, and destructed afterwards. This prevents 'leakage' #of information from one test case to the next. # template< typename FixtureT > class TestCase_ (TestCase) : typedef void (FixtureT.*TestMethodPtr)( Context ) # Constructor adds the TestMethod pointer TestCase_( str sName, TestMethodPtr pTestMethod ) : TestCase( sName ), _pTestMethod( pTestMethod ) # Create a TestFixture instance and invoke def run(ctx) if (TestMethod) else ( FixtureT().*_pTestMethod )( ctx ) virtual ~TestCase_() _pTestMethod = TestMethodPtr() #* #A TestSuite is the \em composite component of the Composite pattern, #and allows aggregation of Tests into hierarchies. # class TestSuite (Test) : TestSuite( str name ) #* Adds a Test to the suite. add = void( Test* pTest ) #* # @returns The immediate child denoted by name, or 0 if not found. # findChild = Test*( str name) accept = virtual bool( Test.Visitor v ) virtual ~TestSuite() typedef std.vector< Test > Tests _tests = Tests() # Collection of Suites and/or Cases #* #TestGraph is a singleton providing central access to the tree of tests #primarily, it provides access to the root suite. # class TestGraph : static TestGraph instance() #* # @return a pointer to the root TestSuite. # root = TestSuite*() #* # A utility function for accessing an arbitrary suite by pathname, relative to # the suite 'tsuite' (defaults to root if null), and with the option of creating # the \em TestSuite designated by \em path, if it does not already exist. # # This method may return 0 if the suite either cannot be found (and createIfNecssary # is 0), or the first component of \em path is not the same as the name of the # TestSuite \em tsuite. # # This was written to aid the auto-registration of tests at specific points in # the test tree, where the tests' AutoRegistrationAgents may be distributed across # several files, and cannot be guaranteed to run in a given order. E.g. You cannot # register a test "root.osg.MyTest" unless you know that the the suite "root.osg" # already exists. # # # @param path The name of the TestSuite to return. # @param tsuite The suite to 'start from'. Path is relative to this # suite (defaults to root suite). # @param createIfNecessary Optionally create the TestSuite(s) denoted by path if # they do not exist. # suite = TestSuite*( str path, TestSuite* tsuite = 0,bool createIfNecessary = False) #* # Does the same job as the version of suite listed above, but the path # is passed in as components in a list, represented by the iterator parameters. # suite = TestSuite*( std.list<str>.iterator it, std.list<str>.iterator end, TestSuite* tsuite, bool createIfNecessary) TestGraph() TestGraph( TestGraph) operator = ( TestGraph) root_ = TestSuite() #* #Maintains a string that when accessed in the "visit" member, returns the #current qualified TestSuite path. # class TestQualifier (TestVisitor) : enum SEPCHAR = ord(".") # Entering a composite: Push its name on the Path visitEnter = virtual bool( TestSuite* pSuite ) # Leaving a composite: Pop its name from the Path visitLeave = virtual bool( TestSuite* pSuite ) # Provide read-only access to the current qualifier str currentPath() _path = str() # Current qualifier #* #QualifiedTestPrinter prints to standard output a list of fully #qualified tests. # class QualifiedTestPrinter (TestQualifier) : visit = virtual bool( TestCase* pTest ) #* #A TestRecord records the output of a given test case, i.e. its start/stop time, #its result, and a textual description of any problems. # #\todo Consider adding accessor methods if necessary, to get the details # stored in the TestRecord. # class TestRecord : TestRecord() TestRecord( TestRecord rhs): name_(rhs.name_), start_(rhs.start_), stop_(rhs.stop_), result_(rhs.result_), problem_(rhs.problem_) operator = ( TestRecord rhs) if rhs==this : return *this name_ = rhs.name_ start_ = rhs.start_ stop_ = rhs.stop_ result_ = rhs.result_ problem_ = rhs.problem_ return *this start = void() stop = void() log = void( TestFailureX e) log = void( TestErrorX e) log = void( std.exception e) log = void( str s) # FIXME: Add accessors? # Onlye a TestReport can create a TestRecord friend class TestReport TestRecord( str name) enum Result Success,Failure,Error friend std.ostream operator, (std.ostream o, TestRecord tr) static osg.Timer timer_ # To time tests name_ = str() start_ = osg.Timer_t() stop_ = osg.Timer_t() result_ = Result() problem_ = str() #* #A TestReport represents the complete set of results (TestRecords) for a #given test run. # #\todo Add support for printing the test report in various formats: # e.g. text, XML, CSV # class TestReport : def createRecord(s): _records.push_back(TestRecord(s)) return _records.back() _records = std.list<TestRecord>() #* #A TestRunner is a visitor which will run specified tests as it traverses the #test graph. # #\todo Consider an accessor method to get at the TestReport if necessary. # class TestRunner (TestQualifier) : TestRunner( TestContext ctx ) #* # Tests may be specified by partial names. E.g. specifiying "root" # will run all tests below root, i.e. all tests. # Specifiying "root.osg" will run all tests below \em root.osg. # Specifying "root.osg.de" will run all tests (and suites) below # \em root.osg with names beginning with the \em de. # specify = void( str sQualifiedName ) visitEnter = bool( TestSuite* pSuite ) visit = bool( TestCase* pTest ) visitLeave = bool( TestSuite* pSuite ) perform = void( TestCase* pTest ) operator = ( TestRunner) return *this _db = TestReport() # Results _ctx = TestContext() # The Global Testing Context _tests = std.vector<str>() # Specified Tests #* #Starts a TestSuite singleton function #@see OSGUTX_ADD_TESTCASE, OSGUTX_END_TESTSUITE # #define OSGUTX_BEGIN_TESTSUITE( tsuite ) \ osgUtx.TestSuite* tsuite##_TestSuite() \ \ static osgUtx.TestSuite s_suite = 0 \ if s_suite == 0 : \ s_suite = osgUtx.TestSuite( #tsuite ) #* #Adds a test case to a suite object being created in a TestSuite singleton function. #@see OSGUTX_BEGIN_TESTSUITE, OSGUTX_END_TESTSUITE # #define OSGUTX_ADD_TESTCASE( tfixture, tmethod ) \ s_suite.add( osgUtx.TestCase_<tfixture>( \ #tmethod, tfixture.tmethod ) ) #* #Ends a TestSuite singleton function #@see OSGUTX_BEGIN_TESTSUITE, OSGUTX_ADD_TESTCASE # #define OSGUTX_END_TESTSUITE \ \ return s_suite \ #* Define a TestSuite accessor #define OSGUTX_TESTSUITE( tsuite ) \ tsuite##_TestSuite() #* #Adds a suite to a suite - allows composition of test suites. #@see OSGUTX_BEGIN_TESTSUITE, OSGUTX_END_TESTSUITE # #define OSGUTX_ADD_TESTSUITE( childSuite ) \ s_suite.add( childSuite##_TestSuite() ) #* Autoregister a testsuite with the root suite at startup #define OSGUTX_AUTOREGISTER_TESTSUITE( tsuite ) \ static osgUtx.TestSuiteAutoRegistrationAgent tsuite##_autoRegistrationObj__( tsuite##_TestSuite() ) #* Auto register a testsuite with at designated point in the suite graph at startup #define OSGUTX_AUTOREGISTER_TESTSUITE_AT( tsuite , path ) \ static osgUtx.TestSuiteAutoRegistrationAgent tsuite##_autoRegistrationObj__( tsuite##_TestSuite(), #path ) namespace osgUtx #* #A helper struct to perform automatic registration at program startup not for #direct use, it should be used via the following macros. (It's a secret agent :-) # #@see OSGUTX_AUTOREGISTER_TESTSUITE, OSGUTX_AUTOREGISTER_TESTSUITE_AT # class TestSuiteAutoRegistrationAgent : TestSuiteAutoRegistrationAgent(TestSuite* tsuite, char* path = 0) if not path : path = "root" # Find the suite named in 'path', create it if necessary regSuite = osgUtx.TestGraph.instance().suite( path, 0, True ) if not regSuite : osg.notify(osg.WARN), "Warning, unable to register test suite named \"", tsuite.name(), "\" at ", path, ", falling back to root suite." regSuite = osgUtx.TestGraph.instance().root() regSuite.add(tsuite) #* #OSGUTX_TEST_F is a convenience macro, analogous to assert(), which will #throw an osgUtx.TestFailureX if \em expr evaluates to False this should be #used to test for failure in a given test, as opposed to an actual error #in the test owing to some other reason than the tested code being faulty. # #The exception will indicate the file and line number of the failed expression, #along with expression itself. # #define OSGUTX_TEST_F( expr ) \ if not (expr) : \ ss = strstream() \ ss, #expr, " failure: ", __FILE__, ", line ", __LINE__, std.ends \ throw osgUtx.TestFailureX(ss.str()) \ #* #OSGUTX_TEST_E is a convenience macro, analogous to assert(), which will #throw an osgUtx.TestErrorX if \em expr evaluates to False this should be #used to test for an error in a given test, as opposed to a failure #in the tested code. # #The exception will indicate the file and line number of the failed expression, #along with expression itself. # #define OSGUTX_TEST_E( expr ) \ if not (expr) : \ ss = strstream() \ ss, #expr, " error: ", __FILE__, ", line ", __LINE__, std.ends \ throw osgUtx.TestErrorX(ss.str()) \ #endif # OSG_UNITTESTFRAMEWORK # Translated from
<reponame>osheraz/komodo<filename>komodo2_rl/src/environments/komodo_env_new.py #! /usr/bin/env python # coding=utf-8 import rospy import time import numpy as np from std_srvs.srv import Empty from nav_msgs.msg import Odometry from std_msgs.msg import Float64 from sensor_msgs.msg import JointState from gazebo_msgs.srv import SetModelState, SetModelStateRequest, SetModelConfiguration, SetModelConfigurationRequest from gazebo_msgs.srv import GetModelState, GetModelStateRequest, GetLinkState, GetLinkStateRequest from gazebo_msgs.msg import ModelState from gazebo_msgs.srv import SpawnModel, SpawnModelRequest, SpawnModelResponse from tf.transformations import quaternion_from_euler, euler_from_quaternion from sensor_msgs.msg import Imu from geometry_msgs.msg import Twist, Vector3Stamped from Spawner import Spawner from matplotlib import path import math import pandas from random import randint class Actions: def __init__(self): self.arm_pos_pub = rospy.Publisher('/arm_position_controller/command', Float64, queue_size=10) self.bucket_pos_pub = rospy.Publisher('/bucket_position_controller/command', Float64, queue_size=10) self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10) self.vel_msg = Twist() # assumption we are moving just in x-axis self.vel_msg.linear.y = 0 self.vel_msg.linear.z = 0 self.vel_msg.angular.x = 0 self.vel_msg.angular.y = 0 self.vel_msg.angular.z = 0 def move(self, cmd): # cmd [velocity , arm , bucket ] self.vel_msg.linear.x = cmd[0] arm_cmd = cmd[1] bucket_cmd = cmd[2] self.arm_pos_pub.publish(arm_cmd) self.bucket_pos_pub.publish(bucket_cmd) self.vel_pub.publish(self.vel_msg) def reset_move(self, cmd): self.vel_msg.linear.x = cmd[0] self.arm_pos_pub.publish(cmd[1]) self.bucket_pos_pub.publish(cmd[2]) self.vel_pub.publish(self.vel_msg) class Pile: def __init__(self): self.length = 1 self.width = 1 self.height = 1 self.size = 0.1 self.radius = 0.035 self.num_particle = 0 self.z_max = 0.26 self.x_min = 0 self.sand_box_x = 0.35 self.sand_box_y = 0.301 self.sand_box_z = 0.0 self.sand_box_height = 0.25 self.cur_height = 0 self.cur_angle = 0 self.center_x = self.sand_box_x / 2 self.center_z = self.sand_box_z / 2 self.HALF_KOMODO = 0.53 / 2 self.spawner = Spawner() self.spawn_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel) self.model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state',SetModelState) self.get_model_state_proxy = rospy.ServiceProxy('/gazebo/get_model_state',GetModelState) # Spawn Box box_req = self.spawner.create_box_request('sand_box', self.sand_box_x, self.sand_box_y, self.sand_box_z,0.0, 0.0, 0.0) self.spawn_srv(box_req) self.pile_box_req = SetModelStateRequest() self.pile_box_req.model_state = ModelState() self.pile_box_req.model_state.model_name = 'sand_box' self.pile_box_req.model_state.pose.position.x = self.sand_box_x self.pile_box_req.model_state.pose.position.y = self.sand_box_y self.pile_box_req.model_state.pose.position.z = self.sand_box_z self.pile_box_req.model_state.pose.orientation.x = 0.0 self.pile_box_req.model_state.pose.orientation.y = 0.0 self.pile_box_req.model_state.pose.orientation.z = 0.0 self.pile_box_req.model_state.pose.orientation.w = 0.0 self.pile_box_req.model_state.twist.linear.x = 0.0 self.pile_box_req.model_state.twist.linear.y = 0.0 self.pile_box_req.model_state.twist.linear.z = 0.0 self.pile_box_req.model_state.twist.angular.x = 0.0 self.pile_box_req.model_state.twist.angular.y = 0.0 self.pile_box_req.model_state.twist.angular.z = 0.0 self.pile_box_req.model_state.reference_frame = 'world' def create_pile(self): count = 0 l = int(self.length/self.size) w = int(self.width/self.size) h = int(self.height/self.size) for k in range(h): #w = w - 1 l = l - 1 for j in range(-w/2 , w/2): for i in range(0,l): count +=1 name = "particle" + str(count) pos = [(2*i+1)*self.radius , (2*j+1)*self.radius, self.radius*(1+2*k) ] rot = [0.0, 0.0, 0.0] # req = self.spawner.create_cube_request(name, pos[0], pos[1], pos[2], # rot[0], rot[1], rot[2], # self.size, self.size, self.size) req = self.spawner.create_sphere_request(name, pos[0], pos[1], pos[2], rot[0], rot[1], rot[2], self.radius) self.spawn_srv(req) self.num_particle = count def set_pile(self,flag): count = 0 l = int(self.length/self.size) w = int(self.width/self.size) h = int(self.height/self.size) self.model_state_proxy(self.pile_box_req) eps = 0.001 for k in range(h): l = l - 1 for j in range(-w/2, w/2): for i in range(0,l): count +=1 if flag == 1 and k == h-1 : self.pile_state_req = SetModelStateRequest() self.pile_state_req.model_state = ModelState() self.pile_state_req.model_state.model_name = 'particle' + str(count) self.pile_state_req.model_state.pose.position.x = (self.radius + eps) * (1 + 2 * i)-1 self.pile_state_req.model_state.pose.position.y = (self.radius + eps) * (1 + 2 * j) self.pile_state_req.model_state.pose.position.z = self.radius * (1 ) self.pile_state_req.model_state.pose.orientation.x = 0.0 self.pile_state_req.model_state.pose.orientation.y = 0.0 self.pile_state_req.model_state.pose.orientation.z = 0.0 self.pile_state_req.model_state.pose.orientation.w = 0.0 self.pile_state_req.model_state.twist.linear.x = 0.0 self.pile_state_req.model_state.twist.linear.y = 0.0 self.pile_state_req.model_state.twist.linear.z = 0.0 self.pile_state_req.model_state.twist.angular.x = 0.0 self.pile_state_req.model_state.twist.angular.y = 0.0 self.pile_state_req.model_state.twist.angular.z = 0.0 self.pile_state_req.model_state.reference_frame = 'world' self.model_state_proxy(self.pile_state_req) continue if flag == 2 and k == 0 and i == l-1: self.pile_state_req = SetModelStateRequest() self.pile_state_req.model_state = ModelState() self.pile_state_req.model_state.model_name = 'particle' + str(count) self.pile_state_req.model_state.pose.position.x = (self.radius + eps) * (1 + 2 * i)-1 self.pile_state_req.model_state.pose.position.y = (self.radius + eps) * (1 + 2 * j) self.pile_state_req.model_state.pose.position.z = self.radius * (1 ) self.pile_state_req.model_state.pose.orientation.x = 0.0 self.pile_state_req.model_state.pose.orientation.y = 0.0 self.pile_state_req.model_state.pose.orientation.z = 0.0 self.pile_state_req.model_state.pose.orientation.w = 0.0 self.pile_state_req.model_state.twist.linear.x = 0.0 self.pile_state_req.model_state.twist.linear.y = 0.0 self.pile_state_req.model_state.twist.linear.z = 0.0 self.pile_state_req.model_state.twist.angular.x = 0.0 self.pile_state_req.model_state.twist.angular.y = 0.0 self.pile_state_req.model_state.twist.angular.z = 0.0 self.pile_state_req.model_state.reference_frame = 'world' self.model_state_proxy(self.pile_state_req) continue self.pile_state_req = SetModelStateRequest() self.pile_state_req.model_state = ModelState() self.pile_state_req.model_state.model_name = 'particle'+str(count) self.pile_state_req.model_state.pose.position.x = (2*i+1)*(self.radius+eps) self.pile_state_req.model_state.pose.position.y = (self.radius+ eps)*(1+2*j) self.pile_state_req.model_state.pose.position.z = self.radius*(1+2*k) self.pile_state_req.model_state.pose.orientation.x = 0.0 self.pile_state_req.model_state.pose.orientation.y = 0.0 self.pile_state_req.model_state.pose.orientation.z = 0.0 self.pile_state_req.model_state.pose.orientation.w = 0.0 self.pile_state_req.model_state.twist.linear.x = 0.0 self.pile_state_req.model_state.twist.linear.y = 0.0 self.pile_state_req.model_state.twist.linear.z = 0.0 self.pile_state_req.model_state.twist.angular.x = 0.0 self.pile_state_req.model_state.twist.angular.y = 0.0 self.pile_state_req.model_state.twist.angular.z = 0.0 self.pile_state_req.model_state.reference_frame = 'world' self.model_state_proxy(self.pile_state_req) def particle_location(self,num_p): px_arr = np.zeros(num_p) py_arr = np.zeros(num_p) pz_arr = np.zeros(num_p) for i in range(1, num_p+1): get_particle_state_req = GetModelStateRequest() get_particle_state_req.model_name = 'particle'+str(i) get_particle_state_req.relative_entity_name = 'base_footprint' # 'world' particle_state = self.get_model_state_proxy(get_particle_state_req) x = abs(particle_state.pose.position.x) + self.HALF_KOMODO y = particle_state.pose.position.y z = particle_state.pose.position.z orientation = particle_state.pose.orientation (roll, pitch, theta) = euler_from_quaternion( [orientation.x, orientation.y, orientation.z, orientation.w]) px_arr[i-1] = x py_arr[i-1] = y pz_arr[i-1] = z return px_arr, pz_arr, py_arr def in_bucket_2d(self,xq, yq, xv, yv): shape = xq.shape xq = xq.reshape(-1) yq = yq.reshape(-1) xv = xv.reshape(-1) yv = yv.reshape(-1) q = [(xq[i], yq[i]) for i in range(xq.shape[0])] p = path.Path([(xv[i], yv[i]) for i in range(xv.shape[0])]) return p.contains_points(q).reshape(shape) class KomodoEnvironment: def __init__(self): rospy.init_node('RL_Node') # TODO: Pile information self.pile = Pile() # (1.75, 2.8, 1.05, 0.34) self.pile.length = 1.75 self.pile.width = 2.8 self.pile.height = 1.05 self.pile.size = 0.34 self.pile_flag = True # TODO: Robot information self.bucket_init_pos = 0 self.arm_init_pos = 0 self.vel_init = 0 self.HALF_KOMODO = 0.53 / 2 self.particle = 0 self.x_tip = 0 self.z_tip = 0 self.bucket_link_x = 0 self.bucket_link_z = 0 self.velocity = 0 self.wheel_vel = 0 self.joint_name_lst = ['arm_joint', 'bucket_joint', 'front_left_wheel_joint', 'front_right_wheel_joint', 'rear_left_wheel_joint', 'rear_right_wheel_joint'] self.last_pos = np.zeros(3) self.last_ori = np.zeros(4) self.max_limit = np.array([0.1, 0.32, 0.9]) # np.array([0.1, 0.32, 0.548]) self.min_limit = np.array([-0.1, -0.1, -0.5]) # np.array([-0.1, -0.2, -0.5]) self.orientation = np.zeros(4) self.angular_vel = np.zeros(3) self.linear_acc = np.zeros(3) # TODO: RL information self.nb_actions = 3 # base , arm , bucket self.state_shape = (self.nb_actions * 2 + 6,) self.action_shape = (self.nb_actions,) self.actions = Actions() self.starting_pos = np.array([self.vel_init,self.arm_init_pos, self.bucket_init_pos]) self.action_range = self.max_limit - self.min_limit self.action_mid = (self.max_limit + self.min_limit) / 2.0 self.last_action = np.zeros(self.nb_actions) self.joint_state = np.zeros(self.nb_actions) self.joint_pos = self.starting_pos self.state = np.zeros(self.state_shape) self.reward = 0.0 self.done = False self.episode_start_time = 0.0 self.max_sim_time = 8.0 # TODO: Robot information Subscribers self.joint_state_subscriber = rospy.Subscriber('/joint_states',JointState,self.joint_state_subscriber_callback) self.velocity_subscriber = rospy.Subscriber('/mobile_base_controller/odom',Odometry,self.velocity_subscriber_callback) self.imu_subscriber = rospy.Subscriber('/IMU',Imu,self.imu_subscriber_callback) # TODO: Gazebo stuff self.pause_proxy = rospy.ServiceProxy('/gazebo/pause_physics',Empty) self.unpause_proxy = rospy.ServiceProxy('/gazebo/unpause_physics',Empty) self.reset_world = rospy.ServiceProxy('/gazebo/reset_world',Empty) self.model_config_proxy = rospy.ServiceProxy('/gazebo/set_model_configuration',SetModelConfiguration) self.model_config_req = SetModelConfigurationRequest() self.model_config_req.model_name = 'komodo2' self.model_config_req.urdf_param_name = 'robot_description' self.model_config_req.joint_names = self.joint_name_lst self.model_config_req.joint_positions = self.starting_pos self.model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state',SetModelState) self.model_state_req = SetModelStateRequest() self.model_state_req.model_state = ModelState() self.model_state_req.model_state.model_name = 'komodo2' self.model_state_req.model_state.pose.position.x = 1.0 self.model_state_req.model_state.pose.position.y = 0.0 self.model_state_req.model_state.pose.position.z = 0.0 self.model_state_req.model_state.pose.orientation.x = 0.0 self.model_state_req.model_state.pose.orientation.y = 0.0 self.model_state_req.model_state.pose.orientation.z = 0.0 self.model_state_req.model_state.pose.orientation.w = 0.0 self.model_state_req.model_state.twist.linear.x = 0.0 self.model_state_req.model_state.twist.linear.y = 0.0 self.model_state_req.model_state.twist.linear.z = 0.0 self.model_state_req.model_state.twist.angular.x = 0.0 self.model_state_req.model_state.twist.angular.y = 0.0 self.model_state_req.model_state.twist.angular.z = 0.0 self.model_state_req.model_state.reference_frame = 'world' self.get_model_state_proxy = rospy.ServiceProxy('/gazebo/get_model_state',GetModelState) self.get_model_state_req = GetModelStateRequest() self.get_model_state_req.model_name = 'komodo2' self.get_model_state_req.relative_entity_name = 'world' self.get_link_state_proxy = rospy.ServiceProxy('/gazebo/get_link_state',GetLinkState) self.get_link_state_req = GetLinkStateRequest() self.get_link_state_req.link_name = 'bucket' self.get_link_state_req.reference_frame = 'world' def normalize_joint_state(self,joint_pos): joint_coef = 3.0 # 3.0 return joint_pos * joint_coef def velocity_subscriber_callback(self, data): vel = data.twist.twist.linear.x self.joint_state[0] = vel # fixed velocity self.velocity = vel def joint_state_subscriber_callback(self,joint_state): self.joint_state[1]= joint_state.position[0] # arm self.joint_state[2] = joint_state.position[1] # bucket self.wheel_vel = joint_state.velocity[2] def imu_subscriber_callback(self,imu): self.orientation = np.array([imu.orientation.x,imu.orientation.y,imu.orientation.z,imu.orientation.w]) self.angular_vel = np.array([imu.angular_velocity.x,imu.angular_velocity.y,imu.angular_velocity.z]) self.linear_acc = np.array([imu.linear_acceleration.x,imu.linear_acceleration.y,imu.linear_acceleration.z]) def reset(self): #pause physics rospy.wait_for_service('/gazebo/pause_physics') try: self.pause_proxy() except rospy.ServiceException as e: print('/gazebo/pause_physics service call failed') #set models pos from world rospy.wait_for_service('/gazebo/set_model_state') try: self.model_state_proxy(self.model_state_req) except rospy.ServiceException as e: print('/gazebo/set_model_state call failed') #set model's joint config rospy.wait_for_service('/gazebo/set_model_configuration') try: self.model_config_proxy(self.model_config_req) except rospy.ServiceException as e: print('/gazebo/set_model_configuration call failed') self.joint_pos = self.starting_pos self.actions.reset_move(self.starting_pos) #spawner rospy.wait_for_service('/gazebo/spawn_sdf_model') try: if self.pile_flag: self.pile.create_pile() self.pile_flag = False self.pile.cur_height = 6 * self.pile.radius self.pile.cur_angle = np.pi / 4 else: x = randint(1, 3) self.pile.set_pile(x) if x == 1: self.pile.cur_height = 4 * self.pile.radius self.pile.cur_angle = np.pi / 4 if x == 2: self.pile.cur_height = 6 * self.pile.radius self.pile.cur_angle = np.pi * 0.312 if x == 3: self.pile.cur_height = 6 * self.pile.radius self.pile.cur_angle = np.pi / 4 except rospy.ServiceException as e: print('/gazebo/unpause_physics service call failed') #unpause physics rospy.wait_for_service('/gazebo/unpause_physics') try: self.unpause_proxy() except rospy.ServiceException as e: print('/gazebo/unpause_physics service call failed') rospy.sleep(0.5) self.reward = 0.0 # Init reward self.state = np.zeros(self.state_shape) rospy.wait_for_service('/gazebo/get_model_state') model_state = self.get_model_state_proxy(self.get_model_state_req) pos = np.array([model_state.pose.position.x, model_state.pose.position.y, model_state.pose.position.z]) done = False rospy.wait_for_service('/gazebo/get_link_state') self.last_joint = self.joint_state self.last_pos = pos diff_joint = np.zeros(self.nb_actions) normed_js = self.normalize_joint_state(self.joint_state) pile_data = np.array([self.pile.cur_height, self.pile.cur_angle]) arm_data = np.array([self.particle, self.x_tip, self.z_tip, self.bucket_link_x, self.bucket_link_z]) model_data = np.array([pos[0]]) self.state = np.concatenate((arm_data, model_data, normed_js, diff_joint)).reshape(1, -1) self.episode_start_time = rospy.get_time() self.last_action = np.zeros(self.nb_actions) return self.state, done def check_particle_in_bucket(self):
<reponame>pbmanis/ephys """ Analyze EPSCs or IPSCs Or EPSPs and IPSPs... This module provides the following analyses: 1. Amplitudes from a train 2. Paired pulse facilitation for pulse pairs, and the first pair in a train. 3. Current-voltage relationship in voltage clamp measured over a time window The results of the analysis are stored in the class variable analysis_summary Note: if the analyzer is called with update_regions set True, then traces will be sent to cursor_plot to get start and end times. (this might be broken now - need to test) """ import sys from pathlib import Path import os #legacy import scipy.signal import pandas as pd import lmfit from collections import OrderedDict from cycler import cycler from itertools import cycle import numpy as np from . import acq4read from . import metaarray as EM # need to use this version for Python 3 from ..tools import cursor_plot as CP import matplotlib.pyplot as mpl import matplotlib.colors import seaborn as sns import pylibrary.plotting.plothelpers as PH import pyqtgraph as pg from pyqtgraph.Qt import QtGui, QtCore from pyqtgraph.Point import Point os.environ['QT_MAC_WANTS_LAYER'] = '1' def make_key(pathname): """ Make a key string using the date, slice, cell and protocol from the path name """ p = pathname.parts return(str('~'.join([p[i] for i in range(-4, 0)]))) class PSC_Fitter(): """ Provide fitting functions for PSCs: 1. decay tau only 2. PSC full fit (1-exp(tau_rise))^4 * exp(tau_fall) """ def __init__(self): pass # nothing to do def _fcn_tau(self, params, x, data): """Model single exponential""" v = params.valuesdict() model = v['amp'] * np.exp(-x/v['tau_fall']) + v['DC'] return model - data def fitTau(self): # create a set of Parameters params = lmfit.Parameters() params.add('amp', value=self.ptreedata.param('Initial Fit Parameters').param('amp').value(), min=-self.dmax, max=self.dmax) params.add('tau_fall', value=self.ptreedata.param('Initial Fit Parameters').param('taufall').value(), min=1e-4, max=1e-1) params.add('DC', value=self.ptreedata.param('Initial Fit Parameters').param('DC').value(), min=-1e3, max=1e3) t0 = self.T0.value() t1 = self.T1.value() it0 = int(t0/self.dt) it1 = int(t1/self.dt) if it0 > it1: t = it0 it0 = it1 it1 = t time_zero = int(self.time_zero/self.dt) print('timezero: ', time_zero, self.dataX[time_zero]) # do fit, here with the default leastsq algorithm minner = lmfit.Minimizer(self._fcn_tau, params, fcn_args=(self.dataX[it0:it1]-self.dataX[time_zero], self.dataY[it0:it1])) self.fitresult = minner.minimize('leastsq') # calculate final result final = self.dataY[it0:it1] + self.fitresult.residual # write error report lmfit.report_fit(self.fitresult) def _fcn_EPSC(self, params, x, data): """Model EPSC""" v = params.valuesdict() model = v['amp'] * (((1. - np.exp(-x/v['tau_rise']))**4.0)*np.exp(-x/v['tau_fall'])) + v['DC'] return model - data def fitEPSC(self): # create a set of Parameters params = lmfit.Parameters() params.add('amp', value=self.ptreedata.param('Initial Fit Parameters').param('amp').value(), min=-self.dmax, max=self.dmax) params.add('tau_rise', value=self.ptreedata.param('Initial Fit Parameters').param('taurise').value(), min=1e-4, max=1e-1) params.add('tau_fall', value=self.ptreedata.param('Initial Fit Parameters').param('taufall').value(), min=1e-4, max=1e-1) params.add('DC', value=self.ptreedata.param('Initial Fit Parameters').param('DC').value(), min=-1e3, max=1e3) dc = np.mean(self.dataY[0:10]) params.add('DC', value=dc, min=dc-dc*1, max=dc+dc*1) t0 = self.T0.value() t1 = self.T1.value() it0 = int(t0/self.dt) it1 = int(t1/self.dt) if it0 > it1: t = it0 it0 = it1 it1 = t # do fit, here with the default leastsq algorithm time_zero = int(self.time_zero/self.dt) print('timezero: ', time_zero, self.dataX[time_zero]) print(self.dataX[it0:it1]-self.time_zero) print(self.dataY[it0:it1]) minner = lmfit.Minimizer(self._fcn_EPSC, params, fcn_args=(self.dataX[it0:it1]-self.dataX[time_zero], self.dataY[it0:it1])) self.fitresult = minner.minimize(method='least_squares', ) # calculate final result final = self.dataY[it0:it1] + self.fitresult.residual # write error report lmfit.report_fit(self.fitresult) class PSCAnalyzer(): def __init__(self, datapath, plot=True, update_regions=False): """ Analyze PSCs in a few different formats: IO - a stimulus sequence with increasing stimulation current, all collected at a single holding voltage VDEP - a Meausrement of EPSCs across voltage, targeted at obtaining an NMDA/AMPA current ratio from currents at +50 and -90 mV. Data may include averaging of repetead trials. PPF - Paired pulse facilitiation over several intervals; may include repeated trials Parameters ---------- datapath : path to the data protocol (Path or string) plot : boolean (default: True) Flag to control plotting of the data update_regions: Boolean (default: False) A flag that forces the routines to plot data so that a time window for the analysis can be defined and saved. """ self.datapath = datapath self.AR = acq4read.Acq4Read() # make our own private cersion of the analysis and reader self.plot = plot self.db = None self.db_filename = None self.update_regions = update_regions self.JunctionPotential = -8.0 * 1e-3 # junction potential for correction self.NMDA_voltage = 0.050 # in V positive self.AMPA_voltage = -0.0741 # in V - this is the Cl eq potential to minize GABA interference self.NMDA_delay = 0.050 # delay in s to make measurement def setup(self, clamps=None, spikes=None, baseline=[0, 0.001]): """ Set up for the fitting Parameters ---------- clamps : A datamodel structure (required) Brings the data to the module. This usually will be a PatchEphys object. spikes : A spikeAnalysis structure (optional) Has information about which traces have spikes Use this when analyzing events that may be contaminated by spikes baseline : list (2 elements) times over which baseline is measured (in seconds) """ if clamps is None: raise ValueError("VC analysis requires defined clamps ") self.Clamps = clamps self.spikes = spikes self.set_baseline_times(baseline) self.analysis_summary = {} # init the result structure def check_protocol(self, protocol): """ Verify that the protocol we are examining is complete. Returns True or False """ return(self.AR.checkProtocol(protocol)) def read_database(self, filename): """ Read the database that will be used for analysis The database is a pandas pickled file with columns date, protocol, T0 and T1 Parameters ---------- filename : str or Path The name of the database file (full path or file if in the current working directory) """ self.db_filename = Path(filename) if self.db_filename.is_file(): with(open(self.db_filename, 'rb')) as fh: self.db = pd.read_pickle(fh, compression=None) else: self.db = pd.DataFrame(columns=['date', 'protocol', 'T0', 'T1']) def update_database(self): """ Write the database """ if self.db is not None: self.db.to_pickle(self.db_filename) def measure_PSC(self, protocolName, plot=True, savetimes=False, ignore_important_flag=True): """ Direct the analysis Uses the beginning of the protocol name to select which analysis to use Parameters: protocolName : str Name of the protocol to analyze, underneath the datapath plot : boolean (default: True) Flag to plot data """ dp_s = str(self.datapath) date, name, cellname, proto, sliceid = self.AR.file_cell_protocol(dp_s) dk = list(self.AR.getIndex(dp_s).keys()) # if 'important' in dk: # print(str(Path(date, name, cellname, proto)), self.AR.getIndex(dp_s)['important']) # else: # print('No important flag in dk') # return False self.AR.setProtocol(self.datapath) # define the protocol path where the data is self.setup(clamps=self.AR) self.read_database(f"{protocolName:s}.p") if self.AR.getData(): # get that data. print('Protocol important: ', self.AR.protocol_important) if not self.AR.protocol_important and not ignore_important_flag: return False ok = False if protocolName.startswith('Stim_IO'): ok = self.analyze_IO() elif protocolName.startswith('VC-EPSC_3'): ok = self.analyze_VDEP() elif protocolName.startswith('PPF'): print('analyzing ppf') ok = self.analyze_PPF() if not ok: print('Failed on protocol in IV: ', self.datapath, protocolName) return False if plot: self.plot_vciv() if savetimes: date = make_key(self.datapath) if date not in self.db['date'].tolist(): self.db.loc[len(self.db)] = [date, protocolName, self.T0, self.T1] print('new date added') else: self.db.loc[date, 'date'] = date self.db.loc[date, 'protocol'] = protocolName self.db.loc[date, 'T0'] = self.T0 self.db.loc[date, 'T1'] = self.T1 print('old date data updated') self.update_database() # print('db head: ', self.db.head()) return True else: return False def get_stimtimes(self): """ This should get the stimulus times. Right now, it does nothing """ pass def set_baseline_times(self, baseline): """ baseline: 2-element list or numpy array """ if len(baseline) != 2: raise ValueError('Baseline must be a 2-element array') if isinstance(baseline, list): baseline = np.array(baseline) self.baseline = np.sort(baseline) def get_baseline(self): """ Return the mean values in the data over the baseline region. """ bl = self.mean_I_analysis(region=self.baseline, reps=[0]) return bl def analyze_IO(self, rmpregion=[0., 0.05], twidth=0.05, deadwin=0.001, protocolName=None, device='Stim0'): """Analyze in input=output relationship for a specific driving device """ pulse_train = self.AR.getStim(device) # get the stimulus information # stim dict in pulse_train will look like: # {'start': [0.05, 0.1], 'duration': [0.0001, 0.0001], # 'amplitude': [0.00025, 0.00025], 'npulses': [2], 'period': [0.05], 'type': ['pulseTrain']} # try: devicedata = self.AR.getDeviceData(device=device, devicename='command') if devicedata is None: print('No device data? name command, ', device) return False filekey = Path(make_key(self.datapath)) # check the db to see if we have parameters already dfiles = self.db['date'].tolist() if filekey in dfiles: delay = self.db.loc[filekey, 'date']['T0'] t1 = self.db.loc[filekey, 'date']['T1'] width = t1-delay else: delay = 1.0*1e-3 width = 15.0*1e-3 self.sign = -1 stim_io = self.AR.sequence[(device, 'command.PulseTrain_amplitude')] reps = self.AR.sequence[('protocol', 'repetitions')] Stim_IO = np.tile(stim_io, len(reps)) # stimuli in order self.analysis_summary[f'PSP_IO'] = [[]]*len(pulse_train['start']) # create space for amplitude results, per pulse self.analysis_summary[f'psc_stim_amplitudes'] = [[]]*len(pulse_train['start']) #
a TF 1.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint in priority if from_flax archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in " f"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False." ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): if not from_tf: raise ValueError( f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " "from_tf to True to load from this checkpoint." ) archive_file = pretrained_model_name_or_path + ".index" else: # set correct filename if from_tf: filename = TF2_WEIGHTS_NAME elif from_flax: filename = FLAX_WEIGHTS_NAME else: filename = WEIGHTS_NAME archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=filename, revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except EnvironmentError as err: logger.error(err) msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") else: resolved_archive_file = None # load pt weights early so that we know which dtype to init the model under if from_pt: if state_dict is None: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") except Exception: raise OSError( f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' " f"at '{resolved_archive_file}'" "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " ) # set dtype to instantiate the model under: # 1. If torch_dtype is not None, we use that dtype # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first # weights entry - we assume all weights are of the same dtype # we also may have config.torch_dtype available, but we won't rely on it till v5 dtype_orig = None if torch_dtype is not None: if isinstance(torch_dtype, str): if torch_dtype == "auto": torch_dtype = next(iter(state_dict.values())).dtype else: raise ValueError( f"`torch_dtype` can be either a `torch.dtype` or `auto`, but received {torch_dtype}" ) dtype_orig = cls._set_default_torch_dtype(torch_dtype) config.name_or_path = pretrained_model_name_or_path # Instantiate model. if is_deepspeed_zero3_enabled(): import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") # this immediately partitions the model across all gpus, to avoid the overhead in time # and memory copying it on CPU or each GPU first with deepspeed.zero.Init(config=deepspeed_config()): with no_init_weights(_enable=_fast_init): model = cls(config, *model_args, **model_kwargs) else: with no_init_weights(_enable=_fast_init): model = cls(config, *model_args, **model_kwargs) if from_pt: # restore default dtype if dtype_orig is not None: torch.set_default_dtype(dtype_orig) if from_tf: if resolved_archive_file.endswith(".index"): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise elif from_flax: try: from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) except ImportError: logger.error( "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see " "https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions." ) raise elif from_pt: model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_state_dict_into_model( model, state_dict, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, _fast_init=_fast_init, ) # make sure token embedding weights are still tied if needed model.tie_weights() # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } return model, loading_info return model @classmethod def _load_state_dict_into_model( cls, model, state_dict, pretrained_model_name_or_path, ignore_mismatched_sizes=False, _fast_init=True ): # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() expected_keys = list(model_state_dict.keys()) loaded_keys = list(state_dict.keys()) prefix = model.base_model_prefix has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) # key re-naming operations are never done on the keys # that are loaded, but always on the keys of the newly initialized model remove_prefix = not has_prefix_module and expects_prefix_module add_prefix = has_prefix_module and not expects_prefix_module if remove_prefix: expected_keys = [".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys] elif add_prefix: expected_keys = [".".join([prefix, s]) for s in expected_keys] missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not # matching the weights in the model. mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if remove_prefix and checkpoint_key.startswith(prefix): model_key = ".".join(checkpoint_key.split(".")[1:]) elif add_prefix: model_key = f"{prefix}.{checkpoint_key}" if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] # Some models may have keys that are not in the state by design, removing them before needlessly warning # the user. if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if _fast_init: # retrieve unintialized modules and initialize unintialized_modules = model.retrieve_modules_from_names( missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix ) for module in unintialized_modules: model._init_weights(module) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata error_msgs = [] # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: nn.Module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) if is_deepspeed_zero3_enabled(): import deepspeed # because zero3 puts placeholders in model params, this context # manager gathers (unpartitions) the params of the current layer, then loads from # the state dict and then re-partitions them again with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): if torch.distributed.get_rank() == 0: module._load_from_state_dict(*args) else: module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") # Make sure we are able to load base models as well as derived models (with heads) start_prefix = "" model_to_load = model if not hasattr(model, cls.base_model_prefix) and has_prefix_module: start_prefix = cls.base_model_prefix + "." if hasattr(model, cls.base_model_prefix) and not has_prefix_module: model_to_load = getattr(model, cls.base_model_prefix) load(model_to_load, prefix=start_prefix) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. This test case reads the configuration file test_migrations.conf for database connection settings to use in the tests. For each connection found in the config file, the test case runs a series of test cases to ensure that migrations work properly both upgrading and downgrading, and that no data loss occurs if possible. There are also "opportunistic" tests for both mysql and postgresql in here, which allows testing against all 3 databases (sqlite in memory, mysql, pg) in a properly configured unit test environment. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password '<PASSWORD>' on localhost. The test will then use that db and u/p combo to run the tests. For postgres on Ubuntu this can be done with the following commands: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password '<PASSWORD>'; postgres=# create database openstack_citest with owner openstack_citest; """ import collections import commands import ConfigParser import datetime import os import urlparse import uuid from migrate.versioning import repository import netaddr import sqlalchemy from sqlalchemy.dialects import postgresql from sqlalchemy.dialects import sqlite import sqlalchemy.exc import nova.db.sqlalchemy.migrate_repo from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import test import nova.virt.baremetal.db.sqlalchemy.migrate_repo LOG = logging.getLogger(__name__) def _get_connect_string(backend, user, passwd, database): """ Try to get a connection with a very specific set of values, if we get these then we'll run the tests, otherwise they are skipped """ if backend == "postgres": backend = "postgresql+psycopg2" elif backend == "mysql": backend = "mysql+mysqldb" else: raise Exception("Unrecognized backend: '%s'" % backend) return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % locals()) def _is_backend_avail(backend, user, passwd, database): try: connect_uri = _get_connect_string(backend, user, passwd, database) engine = sqlalchemy.create_engine(connect_uri) connection = engine.connect() except Exception: # intentionally catch all to handle exceptions even if we don't # have any backend code loaded. return False else: connection.close() engine.dispose() return True def _have_mysql(user, passwd, database): present = os.environ.get('NOVA_TEST_MYSQL_PRESENT') if present is None: return _is_backend_avail('mysql', user, passwd, database) return present.lower() in ('', 'true') def _have_postgresql(user, passwd, database): present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT') if present is None: return _is_backend_avail('postgres', user, passwd, database) return present.lower() in ('', 'true') def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data.""" metadata = sqlalchemy.schema.MetaData() metadata.bind = engine return sqlalchemy.Table(name, metadata, autoload=True) def get_mysql_connection_info(conn_pieces): database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = <PASSWORD>\"" % auth_pieces[1] return (user, password, database, host) def get_pgsql_connection_info(conn_pieces): database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: password = auth_pieces[1].strip() return (user, password, database, host) class CommonTestsMixIn(object): """These tests are shared between TestNovaMigrations and TestBaremetalMigrations. BaseMigrationTestCase is effectively an abstract class, meant to be derived from and not directly tested against; that's why these `test_` methods need to be on a Mixin, so that they won't be picked up as valid tests for BaseMigrationTestCase. """ def test_walk_versions(self): for key, engine in self.engines.items(): self._walk_versions(engine, self.snake_walk) def test_mysql_opportunistically(self): self._test_mysql_opportunistically() def test_mysql_connect_fail(self): """ Test that we can trigger a mysql connection failure and we fail gracefully to ensure we don't break people without mysql """ if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD, self.DATABASE): self.fail("Shouldn't have connected") def test_postgresql_opportunistically(self): self._test_postgresql_opportunistically() def test_postgresql_connect_fail(self): """ Test that we can trigger a postgres connection failure and we fail gracefully to ensure we don't break people without postgres """ if _is_backend_avail('postgres', "openstack_cifail", self.PASSWD, self.DATABASE): self.fail("Shouldn't have connected") class BaseMigrationTestCase(test.TestCase): """Base class fort testing migrations and migration utils.""" USER = None PASSWD = None DATABASE = None def __init__(self, *args, **kwargs): super(BaseMigrationTestCase, self).__init__(*args, **kwargs) self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'test_migrations.conf') # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable # to override the location of the config file for migration testing self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF', self.DEFAULT_CONFIG_FILE) self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__ self.REPOSITORY = repository.Repository( os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) self.INIT_VERSION = 0 self.snake_walk = False self.test_databases = {} self.migration = None self.migration_api = None def setUp(self): super(BaseMigrationTestCase, self).setUp() # Load test databases from the config file. Only do this # once. No need to re-run this on each test... LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) if os.path.exists(self.CONFIG_FILE_PATH): cp = ConfigParser.RawConfigParser() try: cp.read(self.CONFIG_FILE_PATH) defaults = cp.defaults() for key, value in defaults.items(): self.test_databases[key] = value self.snake_walk = cp.getboolean('walk_style', 'snake_walk') except ConfigParser.ParsingError, e: self.fail("Failed to read test_migrations.conf config " "file. Got error: %s" % e) else: self.fail("Failed to find test_migrations.conf config " "file.") self.engines = {} for key, value in self.test_databases.items(): self.engines[key] = sqlalchemy.create_engine(value) # We start each test case with a completely blank slate. self._reset_databases() def tearDown(self): # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self._reset_databases() super(BaseMigrationTestCase, self).tearDown() def execute_cmd(self, cmd=None): status, output = commands.getstatusoutput(cmd) LOG.debug(output) self.assertEqual(0, status, "Failed to run: %s\n%s" % (cmd, output)) @lockutils.synchronized('pgadmin', 'nova-', external=True) def _reset_pg(self, conn_pieces): (user, password, database, host) = \ get_pgsql_connection_info(conn_pieces) os.environ['PGPASSWORD'] = password os.environ['PGUSER'] = user # note(boris-42): We must create and drop database, we can't # drop database which we have connected to, so for such # operations there is a special database template1. sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" " '%(sql)s' -d template1") sql = ("drop database if exists %(database)s;") % locals() droptable = sqlcmd % locals() self.execute_cmd(droptable) sql = ("create database %(database)s;") % locals() createtable = sqlcmd % locals() self.execute_cmd(createtable) os.unsetenv('PGPASSWORD') os.unsetenv('PGUSER') def _reset_databases(self): for key, engine in self.engines.items(): conn_string = self.test_databases[key] conn_pieces = urlparse.urlparse(conn_string) engine.dispose() if conn_string.startswith('sqlite'): # We can just delete the SQLite database, which is # the easiest and cleanest solution db_path = conn_pieces.path.strip('/') if os.path.exists(db_path): os.unlink(db_path) # No need to recreate the SQLite DB. SQLite will # create it for us if it's not there... elif conn_string.startswith('mysql'): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. (user, password, database, host) = \ get_mysql_connection_info(conn_pieces) sql = ("drop database if exists %(database)s; " "create database %(database)s;") % locals() cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " "-e \"%(sql)s\"") % locals() self.execute_cmd(cmd) elif conn_string.startswith('postgresql'): self._reset_pg(conn_pieces) def _test_mysql_opportunistically(self): # Test that table creation on mysql only builds InnoDB tables if not _have_mysql(self.USER, self.PASSWD, self.DATABASE): self.skipTest("mysql not available") # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. connect_string = _get_connect_string("mysql", self.USER, self.PASSWD, self.DATABASE) (user, password, database, host) = \ get_mysql_connection_info(urlparse.urlparse(connect_string)) engine = sqlalchemy.create_engine(connect_string) self.engines[database] = engine self.test_databases[database] = connect_string # build a fully populated mysql database with all the tables self._reset_databases() self._walk_versions(engine, False, False) connection = engine.connect() # sanity check total = connection.execute("SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='%(database)s'" % locals()) self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") noninnodb = connection.execute("SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='%(database)s' " "and ENGINE!='InnoDB' " "and TABLE_NAME!='migrate_version'" % locals()) count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) connection.close() def _test_postgresql_opportunistically(self): # Test postgresql database migration walk if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE): self.skipTest("postgresql not available") # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. connect_string = _get_connect_string("postgres", self.USER, self.PASSWD, self.DATABASE) engine = sqlalchemy.create_engine(connect_string) (user, password,
<reponame>movermeyer/madrona from django.contrib.gis.geos import GEOSGeometry, Polygon, LineString, Point, LinearRing, fromstr from django import forms from madrona.studyregion.models import * from django.conf import settings from madrona.common.utils import LargestPolyFromMulti, LargestLineFromMulti from django.template.loader import render_to_string from django.core.urlresolvers import reverse # manipulatorsDict is bound to this module (won't be reinitialized if module is imported twice) manipulatorsDict = {} from elementtree.ElementTree import fromstring from django.contrib.gis.geos import LinearRing, Polygon from madrona.common.utils import clean_geometry, ensure_clean def simplify(geom): if geom.srid != settings.GEOMETRY_DB_SRID: geom.transform(settings.GEOMETRY_DB_SRID) from django.db import connection cursor = connection.cursor() query = "select simplify(st_geomfromewkt(\'%s\'), %s) as geometry" % (geom.ewkt,settings.KML_SIMPLIFY_TOLERANCE) cursor.execute(query) row = cursor.fetchone() try: newgeom = fromstr(row[0]) newgeom.transform(settings.GEOMETRY_CLIENT_SRID) return newgeom except: raise Exception("KML_SIMPLIFY_TOLERANCE might be too high; simplify failed. Try setting the srid on the input geometry") def display_kml(geom): geom = simplify(geom) if hasattr(geom, 'shell'): coords = [] for coord in geom.shell.coords: coords.append(','.join([str(coord[0]), str(coord[1]), str(settings.KML_EXTRUDE_HEIGHT)])) coords = ' '.join(coords) geom_kml = """<Polygon> <extrude>1</extrude> <altitudeMode>absolute</altitudeMode> <outerBoundaryIs> <LinearRing> <coordinates>%s</coordinates> </LinearRing> </outerBoundaryIs> </Polygon> """ % (coords, ) else: geom_kml = geom.kml return """<?xml version="1.0" encoding="UTF-8"?> <kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom"> <Placemark> <Style> <LineStyle> <color>ffffffff</color> <width>2</width> </LineStyle> <PolyStyle> <color>8000ff00</color> </PolyStyle> </Style> %s </Placemark> </kml>""" % (geom_kml, ) def parsekmlpoly(kmlstring): e = fromstring(kmlstring) coords = coords = e.find('{http://www.opengis.net/kml/2.2}Placemark/{http://www.opengis.net/kml/2.2}Polygon/{http://www.opengis.net/kml/2.2}outerBoundaryIs/{http://www.opengis.net/kml/2.2}LinearRing/{http://www.opengis.net/kml/2.2}coordinates').text coords = coords.lstrip(' ').rstrip(' ').replace('\n', '').replace('\t', '') lra = [] for yxz in coords.split(' '): a = yxz.split(',') if len(a) > 1: lra.append((float(a[0]), float(a[1]))) lr = LinearRing(lra) poly = Polygon(lr) return poly def parsekmllinestring(kmlstring): e = fromstring(kmlstring) coords = coords = e.find('{http://www.opengis.net/kml/2.2}Placemark/{http://www.opengis.net/kml/2.2}LineString/{http://www.opengis.net/kml/2.2}coordinates').text coords = coords.lstrip(' ').rstrip(' ').replace('\n', '').replace('\t', '') lra = [] for yxz in coords.split(' '): a = yxz.split(',') if len(a) > 1: lra.append((float(a[0]), float(a[1]))) linestring = LineString(lra) return linestring def parsekmlpoint(kmlstring): e = fromstring(kmlstring) coords = coords = e.find('{http://www.opengis.net/kml/2.2}Placemark/{http://www.opengis.net/kml/2.2}Point/{http://www.opengis.net/kml/2.2}coordinates').text coords = coords.lstrip(' ').rstrip(' ').replace('\n', '').replace('\t', '') lra = [] for yxz in coords.split(' '): a = yxz.split(',') if len(a) > 1: lra.append((float(a[0]), float(a[1]))) point = Point(lra[0]) return point def parsekml(shape): if shape.find('Polygon') is not -1: return parsekmlpoly(shape) elif shape.find('LineString') is not -1: return parsekmllinestring(shape) else: # point return parsekmlpoint(shape) def iskml(string): return (string.rfind('kml') != -1) class BaseManipulator(object): ''' BaseManipulator should be used as the parent class to all manipulator classes. The manipulate() function should be overridden with suitable definition, it is this function that will be called automatically when your manipulator class is included in the Mpa.Options.manipulators list. This function generally takes as input a target shape geometry, and should return a call to result() containing the 'clipped_shape' and optionally a rendered template 'html' and 'success' value. 'clipped_shape' is the new shape as a result of the manipulator 'html' is generally a template that might be displayed by the client 'success' is a signal, '1' or '0', as to whether the manipulation succeeded or not The do_template() function can be used to render a template with appropriate context The target_to_valid_geom() function can be used to generate a geometry from target shape The result() function should be used for the manipulator return value to ensure that all necessary key/value pairs are provided. Three useful exceptions are provided as well: InternalException is used for exceptions or errors that are considered 'server-side' or 'out of the users control', such as failed database access, or failed geometry operation. InvalidGeometryException is used for exceptions or errors resulting from an innapropriate mpa geometry such as a point, line, or otherwise invalid geometry. HaltManipulations is used for errors, not already handled by InternalException or InvalidGeometryException, that should prevent any further manipulations from taking place. This could be useful in cases such as when an mpa geometry is outside of the study region. In such cases there is no need for further manipulations as such an mpa entry is already deemed inappropriate for our use. ''' def __init__(self, **kwargs): self.kwargs = kwargs def manipulate(self): raise NotImplementedError() def do_template(self, key, internal_message='', extra_context={}): context = {'MEDIA_URL':settings.MEDIA_URL, 'INTERNAL_MESSAGE': internal_message} context.update(extra_context) return render_to_string(self.Options.html_templates[key], context) def target_to_valid_geom(self, shape): try: if iskml(shape): target = parsekml(shape) else: target = GEOSGeometry(shape) except Exception, e: raise self.InvalidGeometryException(e.message) if not target.valid: target = target.buffer(0) if not target.valid: raise self.InvalidGeometryException() target.set_srid(settings.GEOMETRY_CLIENT_SRID) return target def result(self, clipped_shape, html="", success="1"): clipped_shape = ensure_clean(clipped_shape, settings.GEOMETRY_DB_SRID) return {"clipped_shape": clipped_shape, "html": html, "success": success} class Form: available = False class Options: name = 'Manipulator base class' template_name = 'manipulators/manipulator_default.html' html_templates = { 'invalid_geom':'manipulators/invalid_geometry.html', 'internal':'manipulators/internal_error.html', 'unexpected':'manipulators/unexpected_error.html' } class InternalException(Exception): def __init__(self, message="", status_html=None, success="0"): self._message = message if status_html == None: self.template = BaseManipulator.do_template(BaseManipulator(), 'internal', message) self.html = self.template else: self.html = status_html self.success = success def __str__(self): return repr(self._message) class InvalidGeometryException(Exception): def __init__(self, message="", status_html=None, success="0"): self._message = message if status_html == None: self.template = BaseManipulator.do_template(BaseManipulator(), 'invalid_geom', message) self.html = self.template else: self.html = status_html self.success = success def __str__(self): return repr(self._message) class HaltManipulations(Exception): def __init__(self, message="", status_html="", success="0"): self._message = message self.html = status_html self.success = success def __str__(self): return repr(self._message) class ClipToShapeManipulator(BaseManipulator): ''' required arguments: target_shape: GEOSGeometry of the shape to be clipped, in srid GEOMETRY_CLIENT_SRID (4326) clip_against: GEOSGeometry of the shape to clip against, in srid GEOMETRY_CLIENT_SRID (4326) zero: this value may be used to prevent issues that seem to arise from trying to simplify very small geometric results concerning **kwargs: kwargs is included to prevent errors resulting from extra arguments being passed to this manipulator from the generic view manipulate() return value: a call to self.result() with required parameter 'clipped_shape': The returned shape geometry should be in srid GEOMETRY_CLIENT_SRID (4326) The clipped shape will be the largest (in area) polygon result from intersecting 'target_shape' with 'clip_against' and optional parameters 'html' and 'success': The html is usually a template that will be displayed to the client, explaining the manipulation if not provided, this will remain empty The success parameter is defined as '1' for success and '0' for failure if not provided, the default value, '1', is used html_templates=='internal' This represents an 'internal error' and is accessed by raising a ManipulatorInternalException This should occur under the following circumstances: if geometry can not be generated from "clip_against" or intersection call failed clipped_shape will be returned as None html_templates=='invalid_geom' This represents a 'user error' and is accessed by raising an InvalidGeometryException This should occur under the following circumstances: if geometry can not be generated from "target_shape" or if "target_shape" is not a valid geometry clipped_shape will be returned as None html_templates==2 clipped shape is empty (no overlap with "clip_against") html_templates==0 if "target_shape" is successfully clipped to "clip_against" ''' def __init__(self, target_shape, clip_against=None, zero=0.0, **kwargs): self.target_shape = target_shape self.clip_against = clip_against self.zero = zero def manipulate(self): #extract target_shape geometry target_shape = self.target_to_valid_geom(self.target_shape) #extract clip_against geometry try: clip_against = GEOSGeometry(self.clip_against) clip_against.set_srid(settings.GEOMETRY_CLIENT_SRID) except Exception, e: raise self.InternalException("Exception raised in ClipToShapeManipulator while initializing geometry on self.clip_against: " + e.message) if not clip_against.valid: raise self.InternalException("ClipToShapeManipulator: 'clip_against' is not a valid geometry") #intersect the two geometries try: clipped_shape = target_shape.intersection(clip_against) except Exception, e: raise self.InternalException("Exception raised in ClipToShapeManipulator while intersecting geometries: " + e.message) #if there was no overlap (intersection was empty) if clipped_shape.area <= self.zero: status_html = self.do_template("2") message = "intersection resulted in empty geometry" # ALTERATION #1 #return self.result(clipped_shape, target_shape, status_html, message) raise self.HaltManipulations(message, status_html) # ALTERATION #2 #if there was overlap largest_poly = LargestPolyFromMulti(clipped_shape) status_html = self.do_template("0") #message = "'target_shape' was clipped successfully to 'clip_against'" #return self.result(largest_poly, target_shape, status_html, message) return self.result(largest_poly, status_html) ''' #the following is USED FOR TESTING, #assigns db current studyregion as the shape to clip against class Form(forms.Form): available = True target_shape = forms.CharField( widget=forms.HiddenInput ) clip_against = forms.CharField( widget=forms.HiddenInput, required=False ) def clean(self): data = self.cleaned_data #used for sandbox testing clippy = StudyRegion.objects.current().geometry clippy.transform(settings.GEOMETRY_CLIENT_SRID) data["clip_against"] = clippy.wkt #my_manipulator = ClipToShapeManipulator( **kwargs ) my_manipulator = ClipToShapeManipulator( data['target_shape'], data['clip_against'] ) self.manipulation = my_manipulator.manipulate() return self.cleaned_data ''' class Options: name = 'ClipToShape' html_templates = { '0':'manipulators/shape_clip.html', '2':'manipulators/outside_shape.html', } manipulatorsDict[ClipToShapeManipulator.Options.name] = ClipToShapeManipulator class DifferenceFromShapeManipulator(BaseManipulator): ''' required arguments: target_shape: GEOSGeometry of the shape to be clipped, in srid GEOMETRY_CLIENT_SRID (4326) clip_against: GEOSGeometry of the shape
main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\ intentState if not intentState: main.log.warn( "Not all intents installed" ) if intentState: break else: #Dumping intent summary main.log.info( "Intents:\n" + str( main.ONOScli1.intents( jsonFormat=False, summary=True ) ) ) utilities.assert_equals( expect=main.TRUE, actual=intentState, onpass="INTENTS INSTALLED", onfail="SOME INTENTS NOT INSTALLED" ) main.step("Verify flows are all added") for i in range( main.flowCheck ): if i != 0: main.log.warn( "verification failed. Retrying..." ) main.log.info( "Waiting for onos to add flows..." ) time.sleep( main.checkFlowsDelay ) flowState = main.TRUE for cli in main.CLIs: flowState = cli.checkFlowState() if not flowState: main.log.warn( "Not all flows added" ) if flowState: break else: #Dumping summary main.log.info( "Summary:\n" + str( main.ONOScli1.summary(jsonFormat=False) ) ) utilities.assert_equals( expect=main.TRUE, actual=flowState, onpass="FLOWS INSTALLED", onfail="SOME FLOWS NOT ADDED" ) main.step( "Verify Ping across all hosts" ) for i in range(main.numPings): time1 = time.time() pingResult = main.Mininet1.pingall(timeout=main.pingTimeout) if not pingResult: main.log.warn("First pingall failed. Retrying...") time.sleep(main.pingSleep) else: break time2 = time.time() timeDiff = round( ( time2 - time1 ), 2 ) main.log.report( "Time taken for Ping All: " + str( timeDiff ) + " seconds" ) caseResult = ( checkFlowsState and pingResult and intentState ) utilities.assert_equals( expect=main.TRUE, actual=caseResult, onpass="Install 25 multi to single point Intents and Ping All test PASS", onfail="Install 25 multi to single point Intents and Ping All test FAIL" ) if not intentState: main.log.debug( "Intents failed to install completely" ) if not pingResult: main.log.debug( "Pingall failed" ) if not checkFlowsState: main.log.debug( "Flows failed to add completely" ) if not caseResult and main.failSwitch: main.log.report("Stopping test") main.stop( email=main.emailOnStop ) def CASE94( self ): """ Install multi-single point intents and verify Ping all works for Chordal topology """ import copy import time main.log.report( "Install multi-single point intents and verify Ping all" ) main.log.report( "___________________________________________" ) main.case( "Install multi-single point intents and Ping all" ) deviceDPIDsCopy = copy.copy(main.deviceDPIDs) portIngressList = ['1']*(len(deviceDPIDsCopy) - 1) intentIdList = [] main.log.info( "MACsDict" + str(main.MACsDict) ) time1 = time.time() for i in xrange(0,len(deviceDPIDsCopy),int(main.numCtrls)): pool = [] for cli in main.CLIs: egressDevice = deviceDPIDsCopy[i] ingressDeviceList = copy.copy(deviceDPIDsCopy) ingressDeviceList.remove(egressDevice) if i >= len( deviceDPIDsCopy ): break t = main.Thread( target=cli.addMultipointToSinglepointIntent, threadID=main.threadID, name="addMultipointToSinglepointIntent", args =[ingressDeviceList,egressDevice,portIngressList,'1','','',main.MACsDict.get(egressDevice)]) pool.append(t) t.start() i = i + 1 main.threadID = main.threadID + 1 for thread in pool: thread.join() intentIdList.append(thread.result) time2 = time.time() main.log.info("Time for adding point intents: %2f seconds" %(time2-time1)) main.step("Verify intents are installed") # Giving onos multiple chances to install intents for i in range( main.intentCheck ): if i != 0: main.log.warn( "Verification failed. Retrying..." ) main.log.info("Waiting for onos to install intents...") time.sleep( main.checkIntentsDelay ) intentState = main.TRUE for e in range(int(main.numCtrls)): main.log.info( "Checking intents on CLI %s" % (e+1) ) intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\ intentState if not intentState: main.log.warn( "Not all intents installed" ) if intentState: break else: #Dumping intent summary main.log.info( "Intents:\n" + str( main.ONOScli1.intents( jsonFormat=False, summary=True ) ) ) utilities.assert_equals( expect=main.TRUE, actual=intentState, onpass="INTENTS INSTALLED", onfail="SOME INTENTS NOT INSTALLED" ) main.step("Verify flows are all added") for i in range( main.flowCheck ): if i != 0: main.log.warn( "verification failed. Retrying..." ) main.log.info( "Waiting for onos to add flows..." ) time.sleep( main.checkFlowsDelay ) flowState = main.TRUE for cli in main.CLIs: flowState = cli.checkFlowState() if not flowState: main.log.warn( "Not all flows added" ) if flowState: break else: #Dumping summary main.log.info( "Summary:\n" + str( main.ONOScli1.summary(jsonFormat=False) ) ) utilities.assert_equals( expect=main.TRUE, actual=flowState, onpass="FLOWS INSTALLED", onfail="SOME FLOWS NOT ADDED" ) main.step( "Verify Ping across all hosts" ) for i in range(main.numPings): time1 = time.time() pingResult = main.Mininet1.pingall(timeout=main.pingTimeout) if not pingResult: main.log.warn("First pingall failed. Retrying...") time.sleep(main.pingSleep) else: break time2 = time.time() timeDiff = round( ( time2 - time1 ), 2 ) main.log.report( "Time taken for Ping All: " + str( timeDiff ) + " seconds" ) caseResult = ( checkFlowsState and pingResult and intentState ) utilities.assert_equals( expect=main.TRUE, actual=caseResult, onpass="Install 25 multi to single point Intents and Ping All test PASS", onfail="Install 25 multi to single point Intents and Ping All test FAIL" ) if not intentState: main.log.debug( "Intents failed to install completely" ) if not pingResult: main.log.debug( "Pingall failed" ) if not checkFlowsState: main.log.debug( "Flows failed to add completely" ) if not caseResult and main.failSwitch: main.log.report("Stopping test") main.stop( email=main.emailOnStop ) def CASE95( self ): """ Install multi-single point intents and verify Ping all works for Spine topology """ import copy import time main.log.report( "Install multi-single point intents and verify Ping all" ) main.log.report( "___________________________________________" ) main.case( "Install multi-single point intents and Ping all" ) deviceDPIDsCopy = copy.copy(main.deviceDPIDs) portIngressList = ['1']*(len(deviceDPIDsCopy) - 1) intentIdList = [] main.log.info( "MACsDict" + str(main.MACsDict) ) time1 = time.time() for i in xrange(0,len(deviceDPIDsCopy),int(main.numCtrls)): pool = [] for cli in main.CLIs: egressDevice = deviceDPIDsCopy[i] ingressDeviceList = copy.copy(deviceDPIDsCopy) ingressDeviceList.remove(egressDevice) if i >= len( deviceDPIDsCopy ): break t = main.Thread( target=cli.addMultipointToSinglepointIntent, threadID=main.threadID, name="addMultipointToSinglepointIntent", args =[ingressDeviceList,egressDevice,portIngressList,'1','','',main.MACsDict.get(egressDevice)]) pool.append(t) t.start() i = i + 1 main.threadID = main.threadID + 1 for thread in pool: thread.join() intentIdList.append(thread.result) time2 = time.time() main.log.info("Time for adding point intents: %2f seconds" %(time2-time1)) main.step("Verify intents are installed") # Giving onos multiple chances to install intents for i in range( main.intentCheck ): if i != 0: main.log.warn( "Verification failed. Retrying..." ) main.log.info("Waiting for onos to install intents...") time.sleep( main.checkIntentsDelay ) intentState = main.TRUE for e in range(int(main.numCtrls)): main.log.info( "Checking intents on CLI %s" % (e+1) ) intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\ intentState if not intentState: main.log.warn( "Not all intents installed" ) if intentState: break else: #Dumping intent summary main.log.info( "Intents:\n" + str( main.ONOScli1.intents( jsonFormat=False, summary=True ) ) ) utilities.assert_equals( expect=main.TRUE, actual=intentState, onpass="INTENTS INSTALLED", onfail="SOME INTENTS NOT INSTALLED" ) main.step("Verify flows are all added") for i in range( main.flowCheck ): if i != 0: main.log.warn( "verification failed. Retrying..." ) main.log.info( "Waiting for onos to add flows..." ) time.sleep( main.checkFlowsDelay ) flowState = main.TRUE for cli in main.CLIs: flowState = cli.checkFlowState() if not flowState: main.log.warn( "Not all flows added" ) if flowState: break else: #Dumping summary main.log.info( "Summary:\n" + str( main.ONOScli1.summary(jsonFormat=False) ) ) utilities.assert_equals( expect=main.TRUE, actual=flowState, onpass="FLOWS INSTALLED", onfail="SOME FLOWS NOT ADDED" ) main.step( "Verify Ping across all hosts" ) for i in range(main.numPings): time1 = time.time() pingResult = main.Mininet1.pingall(timeout=main.pingTimeout) if not pingResult: main.log.warn("First pingall failed. Retrying...") time.sleep(main.pingSleep) else: break time2 = time.time() timeDiff = round( ( time2 - time1 ), 2 ) main.log.report( "Time taken for Ping All: " + str( timeDiff ) + " seconds" ) caseResult = ( checkFlowsState and pingResult and intentState ) utilities.assert_equals( expect=main.TRUE, actual=caseResult, onpass="Install 25 multi to single point Intents and Ping All test PASS", onfail="Install 25 multi to single point Intents and Ping All test FAIL" ) if not intentState: main.log.debug( "Intents failed to install completely" ) if not pingResult: main.log.debug( "Pingall failed" ) if not checkFlowsState: main.log.debug( "Flows failed to add completely" ) if not caseResult and main.failSwitch: main.log.report("Stopping test") main.stop( email=main.emailOnStop ) def CASE96( self ): """ Install single-multi point intents and verify Ping all works for att topology """ import copy main.log.report( "Install single-multi point intents and verify Ping all" ) main.log.report( "___________________________________________" ) main.case( "Install single-multi point intents and Ping all" ) deviceDPIDsCopy = copy.copy(main.deviceDPIDs) portEgressList = ['1']*(len(deviceDPIDsCopy) - 1) intentIdList = [] main.log.info( "MACsDict" + str(main.MACsDict) ) time1 = time.time() for i in xrange(0,len(deviceDPIDsCopy),int(main.numCtrls)): pool = [] for cli in main.CLIs: ingressDevice = deviceDPIDsCopy[i] egressDeviceList = copy.copy(deviceDPIDsCopy) egressDeviceList.remove(ingressDevice) if i >= len( deviceDPIDsCopy ): break t = main.Thread( target=cli.addSinglepointToMultipointIntent, threadID=main.threadID, name="addSinglepointToMultipointIntent", args =[ingressDevice,egressDeviceList,'1',portEgressList,'',main.MACsDict.get(ingressDevice)]) pool.append(t) t.start() i = i + 1 main.threadID = main.threadID + 1 for thread in pool: thread.join() intentIdList.append(thread.result) time2 = time.time() main.log.info("Time for adding point intents: %2f seconds" %(time2-time1)) main.step("Verify intents are installed") # Giving onos multiple chances to install intents for i in range( main.intentCheck ): if i != 0: main.log.warn( "Verification failed. Retrying..." ) main.log.info("Waiting for onos to install intents...") time.sleep( main.checkIntentsDelay ) intentState = main.TRUE for e in range(int(main.numCtrls)): main.log.info( "Checking intents on CLI %s" % (e+1) ) intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\ intentState if not intentState:
# coding: utf-8 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Utility classes.""" import logging import math import os import sys import time import numpy as np import mxnet as mx from mxnet import nd from mxnet.gluon import rnn, contrib from .data import ParserVocabulary from .tarjan import Tarjan class Progbar(object): """Progbar class copied from keras (https://github.com/fchollet/keras/) Displays a progress bar. Small edit : added strict arg to update Parameters ---------- target : int Total number of steps expected. width : int Progress bar width. verbose : int Verbosity level. Options are 1 and 2. """ def __init__(self, target, width=30, verbose=1): self.width = width self.target = target self.sum_values = {} self.unique_values = [] self.start = time.time() self.total_width = 0 self.seen_so_far = 0 self.verbose = verbose def update(self, current, values=None, exact=None, strict=None): """ Updates the progress bar. Parameters ---------- current : int Index of current step. values : List of tuples (name, value_for_last_step). The progress bar will display averages for these values. exact : List of tuples (name, value_for_last_step). The progress bar will display these values directly. """ values = values or [] exact = exact or [] strict = strict or [] for k, v in values: if k not in self.sum_values: self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far] self.unique_values.append(k) else: self.sum_values[k][0] += v * (current - self.seen_so_far) self.sum_values[k][1] += (current - self.seen_so_far) for cells in exact: k, v, w = cells[0], cells[1], 4 if len(cells) == 3: w = cells[2] if k not in self.sum_values: self.unique_values.append(k) self.sum_values[k] = [v, 1, w] for k, v in strict: if k not in self.sum_values: self.unique_values.append(k) self.sum_values[k] = v self.seen_so_far = current now = time.time() if self.verbose == 1: prev_total_width = self.total_width sys.stdout.write('\b' * prev_total_width) sys.stdout.write('\r') numdigits = 0 if self.target == 0 or math.isnan(self.target) \ else int(np.floor(np.log10(self.target))) + 1 barstr = '%%%dd/%%%dd [' % (numdigits, numdigits) bar = barstr % (current, self.target) prog = 0 if self.target == 0 else float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' sys.stdout.write(bar) self.total_width = len(bar) if current: time_per_unit = (now - self.start) / current else: time_per_unit = 0 eta = time_per_unit * (self.target - current) info = '' if current < self.target: info += ' - ETA: %ds' % eta else: info += ' - %ds' % (now - self.start) for k in self.unique_values: if isinstance(self.sum_values[k], list): info += (' - %s: %.' + str(self.sum_values[k][2]) + 'f') % ( k, self.sum_values[k][0] / max(1, self.sum_values[k][1])) else: info += ' - %s: %s' % (k, self.sum_values[k]) self.total_width += len(info) if prev_total_width > self.total_width: info += ((prev_total_width - self.total_width) * ' ') sys.stdout.write(info) sys.stdout.flush() if current >= self.target: sys.stdout.write('\n') if self.verbose == 2: if current >= self.target: info = '%ds' % (now - self.start) for k in self.unique_values: info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1])) sys.stdout.write(info + '\n') def add(self, n, values=None): values = values or [] self.update(self.seen_so_far + n, values) def mxnet_prefer_gpu(): """If gpu available return gpu, else cpu Returns ------- context : Context The preferable GPU context. """ gpu = int(os.environ.get('MXNET_GPU', default=0)) if gpu in mx.test_utils.list_gpus(): return mx.gpu(gpu) return mx.cpu() def init_logger(root_dir, name='train.log'): """Initialize a logger Parameters ---------- root_dir : str directory for saving log name : str name of logger Returns ------- logger : logging.Logger a logger """ os.makedirs(root_dir, exist_ok=True) log_formatter = logging.Formatter('%(message)s') logger = logging.getLogger(name) file_handler = logging.FileHandler('{0}/{1}'.format(root_dir, name), mode='w') file_handler.setFormatter(log_formatter) logger.addHandler(file_handler) console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) logger.addHandler(console_handler) logger.setLevel(logging.INFO) return logger def orthonormal_VanillaLSTMBuilder(lstm_layers, input_dims, lstm_hiddens, dropout_h=0., debug=False): """Build a standard LSTM cell, with variational dropout, with weights initialized to be orthonormal (https://arxiv.org/abs/1312.6120) Parameters ---------- lstm_layers : int Currently only support one layer input_dims : int word vector dimensions lstm_hiddens : int hidden size dropout_h : float dropout on hidden states debug : bool set to True to skip orthonormal initialization Returns ------- lstm_cell : VariationalDropoutCell A LSTM cell """ assert lstm_layers == 1, 'only accept one layer lstm' W = orthonormal_initializer(lstm_hiddens, lstm_hiddens + input_dims, debug) W_h, W_x = W[:, :lstm_hiddens], W[:, lstm_hiddens:] b = nd.zeros((4 * lstm_hiddens,)) b[lstm_hiddens:2 * lstm_hiddens] = -1.0 lstm_cell = rnn.LSTMCell(input_size=input_dims, hidden_size=lstm_hiddens, i2h_weight_initializer=mx.init.Constant(np.concatenate([W_x] * 4, 0)), h2h_weight_initializer=mx.init.Constant(np.concatenate([W_h] * 4, 0)), h2h_bias_initializer=mx.init.Constant(b)) wrapper = contrib.rnn.VariationalDropoutCell(lstm_cell, drop_states=dropout_h) return wrapper def biLSTM(f_lstm, b_lstm, inputs, dropout_x=0.): """Feature extraction through BiLSTM Parameters ---------- f_lstm : VariationalDropoutCell Forward cell b_lstm : VariationalDropoutCell Backward cell inputs : NDArray seq_len x batch_size dropout_x : float Variational dropout on inputs Returns ------- outputs : NDArray Outputs of BiLSTM layers, seq_len x 2 hidden_dims x batch_size """ for f, b in zip(f_lstm, b_lstm): inputs = nd.Dropout(inputs, dropout_x, axes=[0]) # important for variational dropout fo, _ = f.unroll(length=inputs.shape[0], inputs=inputs, layout='TNC', merge_outputs=True) bo, _ = b.unroll(length=inputs.shape[0], inputs=inputs.flip(axis=0), layout='TNC', merge_outputs=True) f.reset() b.reset() inputs = nd.concat(fo, bo.flip(axis=0), dim=2) return inputs def leaky_relu(x): """slope=0.1 leaky ReLu Parameters ---------- x : NDArray Input Returns ------- y : NDArray y = x > 0 ? x : 0.1 * x """ return nd.LeakyReLU(x, slope=.1) def bilinear(x, W, y, input_size, seq_len, batch_size, num_outputs=1, bias_x=False, bias_y=False): """Do xWy Parameters ---------- x : NDArray (input_size x seq_len) x batch_size W : NDArray (num_outputs x ny) x nx y : NDArray (input_size x seq_len) x batch_size input_size : int input dimension seq_len : int sequence length batch_size : int batch size num_outputs : int number of outputs bias_x : bool whether concat bias vector to input x bias_y : bool whether concat bias vector to input y Returns ------- output : NDArray [seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size """ if bias_x: x = nd.concat(x, nd.ones((1, seq_len, batch_size)), dim=0) if bias_y: y = nd.concat(y, nd.ones((1, seq_len, batch_size)), dim=0) ny = input_size + bias_y # W: (num_outputs x ny) x nx lin = nd.dot(W, x) if num_outputs > 1: lin = reshape_fortran(lin, (ny, num_outputs * seq_len, batch_size)) y = y.transpose([2, 1, 0]) # May cause performance issues lin = lin.transpose([2, 1, 0]) blin = nd.batch_dot(lin, y, transpose_b=True) blin = blin.transpose([2, 1, 0]) if num_outputs > 1: blin = reshape_fortran(blin, (seq_len, num_outputs, seq_len, batch_size)) return blin def orthonormal_initializer(output_size, input_size, debug=False): """adopted from <NAME> https://github.com/tdozat/Parser/blob/master/lib/linalg.py Parameters ---------- output_size : int input_size : int debug : bool Whether to skip this initializer Returns ------- Q : np.ndarray The orthonormal weight matrix of input_size x output_size """ print((output_size, input_size)) if debug: Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) return np.transpose(Q.astype(np.float32)) I = np.eye(output_size) lr = .1 eps = .05 / (output_size + input_size) success = False tries = 0 while not success and tries < 10: Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) for _ in range(100): QTQmI = Q.T.dot(Q) - I loss = np.sum(QTQmI ** 2 / 2) Q2 = Q ** 2 Q -= lr * Q.dot(QTQmI) / ( np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps) if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss): tries += 1 lr /= 2 break success = True if success: print(('Orthogonal pretrainer loss: %.2e' % loss)) else: print('Orthogonal pretrainer failed, using non-orthogonal random matrix') Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) return np.transpose(Q.astype(np.float32)) def arc_argmax(parse_probs, length, tokens_to_keep, ensure_tree=True): """MST Adopted from <NAME> https://github.com/tdozat/Parser/blob/master/lib/models/nn.py Parameters ----------
3: 153, 4: 153, 5: 2, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # ... balance is a little lumpy on the small guy since he wants # one and a half parts :\ expected = { 0: 0.4609375000000142, 1: -0.1914062499999858, 2: -0.1914062499999858, 3: -0.1914062499999858, 4: -0.1914062499999858, 5: 30.46875, } self.assertEqual(expected, rb._build_balance_per_dev()) self.assertEqual(rb.get_balance(), 30.46875) # increasing overload moves towards one replica in each tier rb.set_overload(0.5) expected = { 0: 0.5232035928143712, 1: 0.5232035928143712, 2: 0.5232035928143712, 3: 0.5232035928143712, 4: 0.8982035928143712, 5: 0.008982035928143714, } target_replicas = rb._build_target_replicas_by_tier() self.assertEqual(expected, {t[-1]: r for (t, r) in target_replicas.items() if len(t) == 4}) # ... and as always increasing overload makes balance *worse* rb.rebalance(seed=17) self.assertEqual(rb.get_balance(), 95.703125) # but despite the overall trend toward imbalance, the little guy # isn't really taking on many new parts! expected = { 0: 134, 1: 134, 2: 134, 3: 133, 4: 230, 5: 3, } self.assertEqual(expected, { d['id']: d['parts'] for d in rb._iter_devs()}) # *see*, at everyone's balance is getting worse *together*! expected = { 0: -12.585937499999986, 1: -12.585937499999986, 2: -12.585937499999986, 3: -13.238281249999986, 4: 50.0390625, 5: 95.703125, } self.assertEqual(expected, rb._build_balance_per_dev()) def test_two_servers_with_more_than_one_replica(self): rb = ring.RingBuilder(8, 3, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 60}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 60}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 60}) # z1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 80}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sda', 'weight': 128}) # z2 rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', 'port': 6200, 'device': 'sda', 'weight': 80}) rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', 'port': 6200, 'device': 'sda', 'weight': 240}) rb.set_overload(0.1) rb.rebalance() self.assertEqual(12.161458333333343, rb.get_balance()) replica_plan = rb._build_target_replicas_by_tier() for dev in rb._iter_devs(): tier = (dev['region'], dev['zone'], dev['ip'], dev['id']) expected_parts = replica_plan[tier] * rb.parts self.assertAlmostEqual(dev['parts'], expected_parts, delta=1) def test_multi_zone_with_failed_device(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # sanity, balanced and dispersed expected = { (0, 0): 1.0, (0, 1): 1.0, (0, 2): 1.0, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 2}) self.assertEqual(rb.get_required_overload(), 0.0) # fail a device in zone 2 rb.remove_dev(4) expected = { 0: 0.6, 1: 0.6, 2: 0.6, 3: 0.6, 5: 0.6, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 4}) expected = { 0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 5: 1.0, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 4}) # does this make sense? every zone was holding 1/3rd of the # replicas, so each device was 1/6th, remove a device and # suddenly it's holding *both* sixths which is 2/3rds? self.assertAlmostEqual(rb.get_required_overload(), 2.0 / 3.0) # 10% isn't nearly enough rb.set_overload(0.1) target_replicas = rb._build_target_replicas_by_tier() expected = { 0: 0.585, 1: 0.585, 2: 0.585, 3: 0.585, 5: 0.6599999999999999, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) # 50% isn't even enough rb.set_overload(0.5) target_replicas = rb._build_target_replicas_by_tier() expected = { 0: 0.525, 1: 0.525, 2: 0.525, 3: 0.525, 5: 0.8999999999999999, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) # even 65% isn't enough (but it's getting closer) rb.set_overload(0.65) target_replicas = rb._build_target_replicas_by_tier() expected = { 0: 0.5025000000000001, 1: 0.5025000000000001, 2: 0.5025000000000001, 3: 0.5025000000000001, 5: 0.99, } self.assertEqual(expected, {tier[3]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 4}) def test_balanced_zones_unbalanced_servers(self): rb = ring.RingBuilder(8, 3, 1) # zone 0 server 127.0.0.1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 3000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 3000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 3000}) # zone 1 server 127.0.0.2 rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 4000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 4000}) # zone 1 (again) server 127.0.0.3 rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 1000}) weighted_replicas = rb._build_weighted_replicas_by_tier() # zones are evenly weighted expected = { (0, 0): 1.5, (0, 1): 1.5, } self.assertEqual(expected, {tier: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 2}) # ... but servers are not expected = { '127.0.0.1': 1.5, '127.0.0.2': 1.3333333333333333, '127.0.0.3': 0.16666666666666666, } self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) # make sure wanted will even it out expected = { '127.0.0.1': 1.5, '127.0.0.2': 1.0, '127.0.0.3': 0.4999999999999999, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 3}) # so it wants 1/6th and eats 1/2 - that's 2/6ths more than it # wants which is a 200% increase self.assertAlmostEqual(rb.get_required_overload(), 2.0) # the overload doesn't effect the tiers that are already dispersed rb.set_overload(1) target_replicas = rb._build_target_replicas_by_tier() expected = { '127.0.0.1': 1.5, # notice with half the overload 1/6th replicanth swapped servers '127.0.0.2': 1.1666666666666665, '127.0.0.3': 0.3333333333333333, } self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in target_replicas.items() if len(tier) == 3}) def test_adding_second_zone(self): rb = ring.RingBuilder(3, 3, 1) # zone 0 server 127.0.0.1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # zone 0 server 127.0.0.2 rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # zone 0 server 127.0.0.3 rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', 'port': 6200, 'device': 'sdb', 'weight': 2000}) # sanity, balanced and dispersed expected = { '127.0.0.1': 1.0, '127.0.0.2': 1.0, '127.0.0.3': 1.0, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 3}) self.assertEqual(rb.get_required_overload(), 0) # start adding a second zone # zone 1 server 127.0.1.1 rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', 'port': 6200, 'device': 'sdb', 'weight': 100}) # zone 1 server 127.0.1.2 rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', 'port': 6200, 'device': 'sdb', 'weight': 100}) # zone 1 server 127.0.1.3 rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'ip': '127.0.1.3', 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 11, 'region': 0, 'zone': 1, 'ip': '127.0.1.3', 'port': 6200, 'device': 'sdb', 'weight': 100}) # this messes things up pretty royally expected = { '127.0.0.1': 0.9523809523809523, '127.0.0.2': 0.9523809523809523, '127.0.0.3': 0.9523809523809523, '127.0.1.1': 0.047619047619047616, '127.0.1.2': 0.047619047619047616, '127.0.1.3': 0.047619047619047616, } weighted_replicas = rb._build_weighted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in weighted_replicas.items() if len(tier) == 3}) expected = { '127.0.0.1': 0.6666666666666667, '127.0.0.2': 0.6666666666666667, '127.0.0.3': 0.6666666666666667, '127.0.1.1': 0.3333333333333333, '127.0.1.2': 0.3333333333333333, '127.0.1.3': 0.3333333333333333, } wanted_replicas = rb._build_wanted_replicas_by_tier() self.assertEqual(expected, {tier[2]: weighted for (tier, weighted) in wanted_replicas.items() if len(tier) == 3}) # so dispersion would require these devices hold 6x more than # prescribed by weight, defeating
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.help_files import helps # pylint: disable=line-too-long, too-many-lines helps['monitor'] = """ type: group short-summary: Manage the Azure Monitor Service. """ # region Alerts helps['monitor alert'] = """ type: group short-summary: Manage metric-based alert rules. """ helps['monitor alert create'] = """ type: command short-summary: Create a metric-based alert rule. parameters: - name: --action -a short-summary: Add an action to fire when the alert is triggered. long-summary: | Usage: --action TYPE KEY [ARG ...] Email: --action email <EMAIL> <EMAIL> Webhook: --action webhook https://www.contoso.com/alert apiKey=value Webhook: --action webhook https://www.contoso.com/alert?apiKey=value Multiple actions can be specified by using more than one `--action` argument. - name: --description short-summary: Free-text description of the rule. Defaults to the condition expression. - name: --disabled short-summary: Create the rule in a disabled state. - name: --condition short-summary: The condition which triggers the rule. long-summary: > The form of a condition is "METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} PERIOD". Values for METRIC and appropriate THRESHOLD values can be obtained from `az monitor metric` commands, and PERIOD is of the form "##h##m##s". - name: --email-service-owners short-summary: Email the service owners if an alert is triggered. examples: - name: Create a high CPU usage alert on a VM with no actions. text: > az monitor alert create -n rule1 -g {RG} --target {VM ID} --condition "Percentage CPU > 90 avg 5m" - name: Create a high CPU usage alert on a VM with email and webhook actions. text: | az monitor alert create -n rule1 -g {RG} --target {VM ID} \\ --condition "Percentage CPU > 90 avg 5m" \\ --action email <EMAIL> <EMAIL> --email-service-owners \\ --action webhook https://www.contoso.com/alerts?type=HighCPU \\ --action webhook https://alerts.contoso.com apiKey={KEY} type=HighCPU """ helps['monitor alert update'] = """ type: command short-summary: Update a metric-based alert rule. parameters: - name: --target short-summary: ID of the resource to target for the alert rule. - name: --description short-summary: Description of the rule. - name: --condition short-summary: The condition which triggers the rule. long-summary: > The form of a condition is "METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} PERIOD". Values for METRIC and appropriate THRESHOLD values can be obtained from `az monitor metric` commands, and PERIOD is of the form "##h##m##s". - name: --add-action -a short-summary: Add an action to fire when the alert is triggered. long-summary: | Usage: --add-action TYPE KEY [ARG ...] Email: --add-action email <EMAIL> <EMAIL> Webhook: --add-action webhook https://www.contoso.com/alert apiKey=value Webhook: --add-action webhook https://www.contoso.com/alert?apiKey=value Multiple actions can be specified by using more than one `--add-action` argument. - name: --remove-action -r short-summary: Remove one or more actions. long-summary: | Usage: --remove-action TYPE KEY [KEY ...] Email: --remove-action email <EMAIL> <EMAIL> Webhook: --remove-action webhook https://contoso.com/alert https://alerts.contoso.com - name: --email-service-owners short-summary: Email the service owners if an alert is triggered. - name: --metric short-summary: Name of the metric to base the rule on. populator-commands: - az monitor metrics list-definitions - name: --operator short-summary: How to compare the metric against the threshold. - name: --threshold short-summary: Numeric threshold at which to trigger the alert. - name: --aggregation short-summary: Type of aggregation to apply based on --period. - name: --period short-summary: > Time span over which to apply --aggregation, in nDnHnMnS shorthand or full ISO8601 format. """ helps['monitor alert delete'] = """ type: command short-summary: Delete an alert rule. """ helps['monitor alert list'] = """ type: command short-summary: List alert rules in a resource group. """ helps['monitor alert show'] = """ type: command short-summary: Show an alert rule. """ helps['monitor alert show-incident'] = """ type: command short-summary: Get the details of an alert rule incident. """ helps['monitor alert list-incidents'] = """ type: command short-summary: List all incidents for an alert rule. """ # endregion # region Metrics helps['monitor metrics'] = """ type: group short-summary: View Azure resource metrics. """ helps['monitor metrics list'] = """ type: command short-summary: List metric values for a resource. """ helps['monitor metrics list-definitions'] = """ type: command short-summary: List metric definitions for a resource. """ # endregion helps['monitor log-profiles'] = """ type: group short-summary: Manage log profiles. """ helps['monitor log-profiles update'] = """ type: command short-summary: Update a log profile. """ helps['monitor diagnostic-settings'] = """ type: group short-summary: Manage service diagnostic settings. """ helps['monitor diagnostic-settings create'] = """ type: command short-summary: Create diagnostic settings for the specified resource. parameters: - name: --resource-id type: string short-summary: The identifier of the resource. - name: --resource-group -g type: string short-summary: Name of the resource group. - name: --logs type: string short-summary: JSON encoded list of logs settings. Use @{file} to load from a file. - name: --metrics type: string short-summary: JSON encoded list of metric settings. Use @{file} to load from a file. - name: --storage-account type: string short-summary: Name or ID of the storage account to send diagnostic logs to. - name: --namespace type: string short-summary: Name or ID of the Service Bus namespace. - name: --rule-name type: string short-summary: Name of the Service Bus authorization rule. - name: --workspace type: string short-summary: Name or ID of the Log Analytics workspace to send diagnostic logs to. - name: --tags short-summary: Space separated tags in 'key[=value]' format. Use '' to clear existing tags """ helps['monitor diagnostic-settings update'] = """ type: command short-summary: Update diagnostic settings. """ helps['monitor autoscale-settings'] = """ type: group short-summary: Manage autoscale settings. """ helps['monitor autoscale-settings update'] = """ type: command short-summary: Updates an autoscale setting. """ helps['monitor activity-log'] = """ type: group short-summary: Manage activity logs. """ helps['monitor action-group'] = """ type: group short-summary: Manage action groups """ helps['monitor action-group list'] = """ type: command short-summary: List action groups under a resource group or the current subscription parameters: - name: --resource-group -g type: string short-summary: Name of the resource group under which the action groups are being listed. If it is omitted, all the action groups under the current subscription are listed. """ helps['monitor action-group show'] = """ type: command short-summary: Show the details of an action group """ helps['monitor action-group create'] = """ type: command short-summary: Create a new action group parameters: - name: --action -a short-summary: Add receivers to the action group during the creation long-summary: | Usage: --action TYPE NAME [ARG ...] Email: --action email bob <EMAIL> SMS: --action sms charli 1 5551234567 Webhook: --action webhook alert_hook https://www.contoso.com/alert Multiple actions can be specified by using more than one `--action` argument. - name: --short-name short-summary: The short name of the action group """ helps['monitor action-group update'] = """ type: command short-summary: Update an action group parameters: - name: --short-name short-summary: Update the group short name of the action group - name: --add-action -a short-summary: Add receivers to the action group long-summary: | Usage: --add-action TYPE NAME [ARG ...] Email: --add-action email bob <EMAIL> SMS: --add-action sms charli 1 5551234567 Webhook: --add-action https://www.contoso.com/alert Multiple actions can be specified by using more than one `--add-action` argument. - name: --remove-action -r short-summary: Remove receivers from the action group. Accept space separated list of receiver names. """ helps['monitor activity-log alert'] = """ type: group short-summary: Manage activity log alerts """ helps['monitor activity-log alert list'] = """ type: command short-summary: List activity log alerts under a resource group or the current subscription. parameters: - name: --resource-group -g short-summary: Name of the resource group under which the activity log alerts are being listed. If it is omitted, all the activity log alerts under the current subscription are listed. """ helps['monitor activity-log alert create'] = """ type: command short-summary: Create a default activity log alert long-summary: This command will create a default activity log with one condition which compares if the activities logs 'category' field equals to 'ServiceHealth'. The newly created activity log alert does not have any action groups attached to it. parameters: - name: --name -n short-summary: Name of the activity log alerts - name: --scope -s short-summary: A list of string that will be used as prefixes. The alert will only apply to activityLogs with resourceIds that fall under one of these prefixes. If not provided, the path to this resource group will
#!/usr/bin/env python3 import sys, os, shutil, time, math, re, stat from optparse import OptionParser import datetime import fileReadHelp import json try: import annotate except ImportError: sys.stdout.write('[Warning: "annotate" import failed, no annotations will be generated]\n') annotate = None usage = '%prog [options] <graphfiles>' parser = OptionParser(usage=usage) parser.add_option('-g', '--graphlist', dest='graphlist', help='file with list of graphfiles', metavar='FILE') parser.add_option('-t', '--testdir', dest='testdir', help='test directory', metavar='DIR', default='.') parser.add_option('-o', '--outdir', dest='outdir', help='output directory', metavar='DIR', default=None) parser.add_option('-p', '--perfdir', dest='perfdir', help='performance data directory', metavar='DIR', default=None) parser.add_option('-n', '--name', dest='name', help='Test platform name', metavar='NAME', default=None) parser.add_option('-s', '--startdate', dest='startdate', help='graph start date', metavar='DATE') parser.add_option('-e', '--enddate', dest='enddate', help='graph end date', metavar='DATE') parser.add_option('-a', '--alttitle', dest='alttitle', help='alternate/custom site title', metavar='NAME', default=None) parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False) parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False) parser.add_option('-r', '--reduce', dest='g_reduce', type='choice', metavar='STRATEGY', default='avg', choices=['avg', 'med', 'min', 'max']) parser.add_option('-x', '--no-bounds', dest='g_display_bounds', action='store_false', default=True) parser.add_option('-u', '--numericX', dest='numericX', help='expect numbers (e.g. revisions), not dates,' + ' for the X axis', action='store_true', default=False) parser.add_option('-m', '--configs', dest='multiConf', help='comma separated list of configurations. ":v" after a ' 'configuration makes it visible by default. e.g ' '"local:v,--no-local:v" will create graphs with series ' 'duplicated for local and --no-local both of which will ' 'be visible by default on the web page.', default='') if annotate: parser.add_option('-j', '--annotate', dest='annotation_file', default=None) debug = False verbose = False numericX = False multiConf = [] defaultMultiConf = [] def try_parse_float(value): try: # removes any trailing characters (units) return float(re.sub(r'[^\d,.]+$', '', value)) except ValueError: return value def parse_date(value, dateformat='%m/%d/%y'): if numericX: return value else: return time.strptime(value.strip(), dateformat) def show_date(value=time.localtime()): if numericX: return value else: return time.strftime('%Y-%m-%d', value) # converts a csv file to json. Converts the csv file to json where the json # object has two members: the labels and the actual data formatted as required # by dygraphs def csv_to_json(csvFile, ginfo): data = parse_csv(csvFile) os.unlink(csvFile) # rename the csv file to indicate that it's a json file jsonFile = os.path.splitext(csvFile)[0]+'.json' ginfo.datfname = os.path.splitext(ginfo.datfname)[0]+'.json' # each label is stored in a single element array because of how it is # parsed, get rid of that array so labels is now a simple array of strings labels = [a[0] for a in data[0]] data = data[1:] lines = [] for line in data: # like for the labels, strip the array surrounding the date curDate = line[0][0] dataForCurDate = line[1:] curLine = [curDate] # the data for a series on the current date is stored as ['val'], # ['low', 'med', 'high'], or ['']. Need to parse as floats, and turn # [''] (no data for this series on this date) into None so json.dumps # turns it into null. If we're not using custom bars (displayrange is # false) then our val will come in as ['val'] but we just want to store # the single value, not the single value in an array for seriesArr in dataForCurDate: if len(seriesArr) == 1 and seriesArr[0] == '': curLine.append(None) else: if (ginfo.displayrange): curLine.append([try_parse_float(x) for x in seriesArr]) else: curLine.append(try_parse_float(seriesArr[0])) lines.append(curLine) # if there was no data in the csvFile, create an empty entry for todays # date. Dygraphs does not accept a zero length array for the data so we add # a single entry for today's date with null data for each series if len(lines) == 0: line = [None for label in labels] line[0] = show_date() lines.append(line) jsonObj = {'labels': labels, 'data': lines} with open(jsonFile, 'w') as f: f.write(json.dumps(jsonObj)) # Helper functions to parse/write/sort a dygraphs compatible csv file. # # Expected form of csv file is: # # Date,<perfKey1>,<perfKey2> # YYYY-mm-dd,<key1Value>,<key2Value> # YYYY-mm-dd,<key1Value>,<key2Value> # # where <keyXValue> is of the form 'val' for numtrials=1 (customBars are not # being used), 'lowVal;medVal;highVal' for numTrials>1 (customBars are being # used), or '' if there was no no value for that key for that date # # Parses a csv file of the above form into a list of the form: # # [[['Date'], ['perfKey1'], ['perfKey2']], # [['YYYY-mm-dd'],[<key1Value>],[<key2Value>]], # [['YYYY-mm-dd'],[<key1Value>],[<key2Value>]]] # # where <keyXValue> is either a single value as a string, 3 values (low, med, # high) as strings, or the empty string if there was no value for that key for # that date def parse_csv(csvFile): lines = fileReadHelp.ReadFileWithComments(csvFile) data = [] for line in lines: line = line.rstrip() if len(line) > 0: valuesString = line.split(',') values = [] for valueString in valuesString: values.append(valueString.split(';')) data.append(values) return data def data_to_csv(data, csvFile): lines = [] for values in data: line = [] for value in values: line.append(';'.join(value)) lines.append(','.join(line)+'\n') with open(csvFile, 'w') as f: f.writelines(lines) # sorts a csv of the aforementioned form. Sorts a series' keys and it's # corresponding values (column) from greatest to least in terms of a series # most recent data. # Takes: # Date,<perfKey1>,<perfKey2> # YYYY-mm-dd,1;2;3,0;1;2 # YYYY-mm-dd,1;2;3,3;4;5 # and transforms it into: # Date,<perfKey2>,<perfKey1> # YYYY-mm-dd,0;1;2,1;2;3 # YYYY-mm-dd,3;4;5,1;2;3 # # also works for 'val', instead of 'low;med;high' and empty values: '' def sort_csv(csvFile): data = parse_csv(csvFile) if len(data) == 1: return # transpose the data so that we can sort by row data = list(zip(*data)) # remove the Date perfkey and the actual dates as they screw up sorting dates = data.pop(0) # sort function called on values of the form: # [['perfKey1'],[<key1Value>],[<key1Value>]] # where keyXValue are of the form described above. sorts by the most recent # date, and grabs the middle value. returns -1 for empty values, so that # series with no recent data filter down to the bottom def parse_sortable_float(values): mostRecentValues = values[len(values)-1] value = mostRecentValues[len(mostRecentValues)//2] if value == '': return -1.0 return try_parse_float(value) data.sort(key=lambda values: parse_sortable_float(values), reverse=True) # add the dates back in data.insert(0, dates) # untranspose the data data = list(zip(*data)) data_to_csv(data, csvFile) # Yield dateformat-ed dates in the range [start_date, end_date] def date_range(start_date, end_date, dateformat='%Y-%m-%d'): cur_date = datetime.datetime.strptime(start_date, dateformat) end_date = datetime.datetime.strptime(end_date, dateformat) while cur_date <= end_date: yield cur_date.strftime(dateformat) cur_date += datetime.timedelta(days=1) # Fill in missing dates in the csv file. Grabs the start and end date from the # file, and ensures that we have an entry for every date in the range. # We do this because annotations require an actual data point to attach to, so # we make sure there will always be a day available def fill_sparse_csv(csvFile): data = parse_csv(csvFile) keys = data.pop(0) if len(data) > 1: dates = list(zip(*data)).pop(0) dates = [date[0] for date in dates] if len(dates) > 1: start_date = dates[0] end_date = dates[-1] # for all the missing days append an empty entry to the end of the # data (we'll sort later to get things in the right order) missing_dates = set(date_range(start_date, end_date)) - set(dates) for date in missing_dates: # emptydate = [[date], [''], [''], ...] emptydate = [[date]] + [['']]*(len(keys)-1) data.append(emptydate) # sort our data, we don't need to convert our date strings to datetimes # because for ISO 8601 dates lexical order is also chronological order data.sort() data.insert(0, keys) data_to_csv(data, csvFile) # Strips all but the first 'numseries' series from a csv file. Useful for # things like compiler performance testing where you want to display the top 10 # passes. If multiple configurations are being used it grabs the series from # the default configuration and then finds the other configurations for those # series. def strip_series(csvFile, numseries): data = parse_csv(csvFile) labels = [a[0] for a in data[0]] newData = [] data = list(zip(*data)) numFound = 0 newData.append(data[0]) if multiConf: defaultConf = multiConf[0] for i in range(1, len(labels)): if labels[i].endswith('(' + defaultConf + ')') and numFound < numseries: numFound+=1 newData.append(data[i]) for conf in multiConf[1:]: confLabel = labels[i].replace('('+defaultConf+')','('+conf+')') newData.append(data[labels.index(confLabel)]) else: for i in range(1, numseries+1): newData.append(data[i]) # untranspose the data data = list(zip(*newData)) data_to_csv(data, csvFile) # Find the series to attach annotations to. If there were multiple # configurations, attach to a series in the default (first listed) # configuration. Else attach to first series def get_annotation_series(csvFile): data = parse_csv(csvFile) labels = [a[0] for a in data[0]] labels = labels[1:] annotation_series = labels[0] if multiConf: defaultConf = multiConf[0] for label in labels: if label.endswith('(' + defaultConf + ')'): annotation_series = label break return annotation_series ############ class CouldNotReadGraphFile(Exception): pass # Global info about generating graphs class GraphStuff: def __init__(self, _name, _testdir, _perfdir, _outdir, _startdate, _enddate, _reduce, _display_bounds, _alttitle, _annotation_file): self.numGraphs = 0 self.config_name = _name self.testdir = _testdir self.perfdir = _perfdir self.outdir = _outdir self.datdir = self.outdir+'/'+'CSVfiles' self.title = 'Chapel Performance Graphs' if _alttitle:
= mod16(rst, t, RS); int r = div16(rs, magic_S, shift_S); int s = mod16(rs, r, S); int x = qs + s; int y = pr + r; int z = mt + t; int c = kj + j; bool bounds_x = x >= 0 && x < W; bool bounds_y = y >= 0 && y < H; bool bounds_z = z >= 0 && z < D; bool bounds_c = c >= 0 && c < C; bool in_bounds = bounds_x && bounds_y && bounds_z && bounds_c; int sliceI = c*DHWN + z*HWN + y*WN + x*N; int lut_offset = mad16(sb, JRST, jrst); lut[lut_offset] = in_bounds ? sliceI : -1; jrst += inc; } } __syncthreads(); int intermediate_max = 0; if (p < P && q < Q && n < N) { delta *= alpha; bool load_beta = beta != 0.0f; int jrst = 0; while (jrst < JRST) { int lut_offset = mad16(sb, JRST, jrst); int offset0 = lut[lut_offset + 0]; int offset1 = lut[lut_offset + 1]; int offset2 = lut[lut_offset + 2]; int offset3 = lut[lut_offset + 3]; // need to figure out how to write into output. Can't be float * if we write fp16 // load fp16 from O, so it's an fp16 pointer %(type)s* out0 = O + offset0; %(type)s* out1 = O + offset1; %(type)s* out2 = O + offset2; %(type)s* out3 = O + offset3; bool valid0 = jrst + 0 < JRST && offset0 >= 0; bool valid1 = jrst + 1 < JRST && offset1 >= 0; bool valid2 = jrst + 2 < JRST && offset2 >= 0; bool valid3 = jrst + 3 < JRST && offset3 >= 0; // load input dtype, convert to float32. float beta0 = valid0 && load_beta ? %(cvt)s(__ldg(out0)) * beta : 0.0f; float beta1 = valid1 && load_beta ? %(cvt)s(__ldg(out1)) * beta : 0.0f; float beta2 = valid2 && load_beta ? %(cvt)s(__ldg(out2)) * beta : 0.0f; float beta3 = valid3 && load_beta ? %(cvt)s(__ldg(out3)) * beta : 0.0f; // convert float32 back into input format to write out %(type)s temp_out0 = valid0 ? %(cvt_out)s(%(mul_by_scale)s(jrst + 0 == argmax ? delta + beta0 : beta0)) : 0.0f; %(type)s temp_out1 = valid1 ? %(cvt_out)s(%(mul_by_scale)s(jrst + 1 == argmax ? delta + beta1 : beta1)) : 0.0f; %(type)s temp_out2 = valid2 ? %(cvt_out)s(%(mul_by_scale)s(jrst + 2 == argmax ? delta + beta2 : beta2)) : 0.0f; %(type)s temp_out3 = valid3 ? %(cvt_out)s(%(mul_by_scale)s(jrst + 3 == argmax ? delta + beta3 : beta3)) : 0.0f; // predicate writes with no-op flag. if (!(flags & 1)) { if (valid0) *out0 = temp_out0; if (valid1) *out1 = temp_out1; if (valid2) *out2 = temp_out2; if (valid3) *out3 = temp_out3; } intermediate_max = max_abs(intermediate_max, temp_out0); intermediate_max = max_abs(intermediate_max, temp_out1); intermediate_max = max_abs(intermediate_max, temp_out2); intermediate_max = max_abs(intermediate_max, temp_out3); jrst += 4; } } intermediate_max += 0; %(atomic_max)s } """ template_vals = prepare_template_vals(clss, compute_capability) code = code % template_vals module = SourceModule(code) kernel = module.get_function("spool_bprop_max") # f = open("spool_bprop_max.cu", "w") # print >>f, code # f.close() kernel.prepare("3P 2f 44I" + ("Pf" if (clss[0] == "x") else "")) return kernel @context_dependent_memoize def _get_bprop_avg(clss, compute_capability): code = r""" %(common)s __global__ void spool_bprop_avg( const %(type)s* I, %(type)s* O, const unsigned char* A, float alpha, float beta, int flags, int N, int W, int H, int D, int C, int WN, int HWN, int DHWN, int P, int Q, int magic_P, int shift_P, int QN, int PQN, int MPQN, int pad_c, int pad_d, int pad_h, int pad_w, int str_c, int str_d, int str_h, int str_w, int S, int RS, int RST, int JRST, int magic_S, int shift_S, int magic_RS, int shift_RS, int magic_RST, int shift_RST, int supP, int supQ, int shlP, int maskP, int shrP, int shlQ, int maskQ, int shrQ, int maskN, int shrN %(stats_args)s ) { __shared__ float rcpWindowSize[32]; extern __shared__ int lut[]; int tid = threadIdx.x; int q = blockIdx.x; int mp = blockIdx.y; int k = blockIdx.z; int m = mp * magic_P; m >>= shift_P; int p = mp - m*supP; // zigzag q back and forth to improve L2 cache perf if (p & 1) q = supQ - q - 1; // Superblock P and Q p = (p << shlP) + ((tid & maskP) >> shrP); q = (q << shlQ) + ((tid & maskQ) >> shrQ); int n = tid & maskN; int sb = tid >> shrN; O += n; I += k*MPQN + m*PQN + p*QN + mad16(q, N, n); float delta = 0.0f; if (p < P && q < Q && n < N) delta = %(cvt)s(__ldg(I)); if (tid < 32) { int kj = k * str_c - pad_c; int mt = m * str_d - pad_d; int pr = p * str_h - pad_h; int qs = q * str_w - pad_w; int inc = min(maskN + 1, 32); int sbBits = 1 << min(shrN, 5); int sbMask = ~(-1 << sbBits) << mad16(sb, sbBits, 0); int window_size = 0; int jrst = n; while (jrst < JRST) { int j = div16(jrst, magic_RST, shift_RST); int rst = mod16(jrst, j, RST); int t = div16(rst, magic_RS, shift_RS); int rs = mod16(rst, t, RS); int r = div16(rs, magic_S, shift_S); int s = mod16(rs, r, S); int x = qs + s; int y = pr + r; int z = mt + t; int c = kj + j; bool bounds_x = x >= 0 && x < W; bool bounds_y = y >= 0 && y < H; bool bounds_z = z >= 0 && z < D; bool bounds_c = c >= 0 && c < C; bool in_bounds = bounds_x && bounds_y && bounds_z && bounds_c; // Count the total valid slices window_size += __popc(sbMask & __ballot(in_bounds)); int sliceI = c*DHWN + z*HWN + y*WN + x*N; int lut_offset = mad16(sb, JRST, jrst); lut[lut_offset] = in_bounds ? sliceI : -1; jrst += inc; } // TODO confirm kepler OK unsigned int shrN_mask = (shrN < 32) ? max(1, ((1 << shrN) - 1)) : 0xffffffff; if((tid & shrN_mask) == 0) rcpWindowSize[sb] = 1.0f / (float)window_size; } __syncthreads(); int intermediate_max = 0; if (p < P && q < Q && n < N) { delta *= alpha * rcpWindowSize[sb]; bool load_beta = beta != 0.0f; int jrst = 0; while (jrst < JRST) { int lut_offset = mad16(sb, JRST, jrst); int offset0 = lut[lut_offset + 0]; int offset1 = lut[lut_offset + 1]; int offset2 = lut[lut_offset + 2]; int offset3 = lut[lut_offset + 3]; %(type)s* out0 = O + offset0; %(type)s* out1 = O + offset1; %(type)s* out2 = O + offset2; %(type)s* out3 = O + offset3; bool valid0 = jrst + 0 < JRST && offset0 >= 0; bool valid1 = jrst + 1 < JRST && offset1 >= 0; bool valid2 = jrst + 2 < JRST && offset2 >= 0; bool valid3 = jrst + 3 < JRST && offset3 >= 0; float beta0 = valid0 && load_beta ? %(cvt)s(__ldg(out0)) * beta : 0.0f; float beta1 = valid1 && load_beta ? %(cvt)s(__ldg(out1)) * beta : 0.0f; float beta2 = valid2 && load_beta ? %(cvt)s(__ldg(out2)) * beta : 0.0f; float beta3 = valid3 && load_beta ? %(cvt)s(__ldg(out3)) * beta : 0.0f; %(type)s temp_out0 = valid0 ? %(cvt_out)s(%(mul_by_scale)s(delta + beta0)) : 0.0f; %(type)s temp_out1 = valid1 ? %(cvt_out)s(%(mul_by_scale)s(delta + beta1)) : 0.0f; %(type)s temp_out2 = valid2 ? %(cvt_out)s(%(mul_by_scale)s(delta + beta2)) : 0.0f; %(type)s temp_out3 = valid3 ? %(cvt_out)s(%(mul_by_scale)s(delta + beta3)) : 0.0f; // predicate writes with no-op flag. if (!(flags & 1)) { if (valid0) *out0 = temp_out0; if (valid1) *out1 = temp_out1; if (valid2) *out2 = temp_out2; if (valid3) *out3 = temp_out3; } intermediate_max = max_abs(intermediate_max, temp_out0); intermediate_max = max_abs(intermediate_max,
""" Functions for geometrical image transformation and warping. """ import sys import time import numbers from functools import wraps from collections import namedtuple import numpy as np import scipy.ndimage from scipy.signal import convolve2d from sunpy import log from sunpy.util.exceptions import warn_deprecated, warn_user __all__ = ['add_rotation_function', 'affine_transform'] def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None, recenter=False, missing=np.nan, use_scipy=None, *, method='scipy', clip=True): """ Rotates, shifts and scales an image. This function supports NaN values in the input image and supports using NaN for pixels in the output image that are beyond the extent of the input image. Handling NaN values in the input image requires additional computation time. Parameters ---------- image : `numpy.ndarray` 2D image to be rotated. rmatrix : `numpy.ndarray` that is 2x2 Linear transformation rotation matrix. order : `int` 0-5, optional Interpolation order to be used, defaults to 3. The precise meaning depends on the rotation method specifed by ``method``. scale : `float` A scale factor for the image with the default being no scaling. image_center : tuple, optional The point in the image to rotate around (axis of rotation). Defaults to the center of the array. recenter : `bool` or array-like, optional Move the axis of rotation to the center of the array or recenter coords. Defaults to `False`. missing : `float`, optional The value to use for pixels in the output image that are beyond the extent of the input image. Defaults to `numpy.nan`. method : {{{rotation_function_names}}}, optional Rotation function to use. Defaults to ``'scipy'``. clip : `bool`, optional If `True`, clips the pixel values of the output image to the range of the input image (including the value of ``missing``, if used). Defaults to `True`. Returns ------- `numpy.ndarray`: New rotated, scaled and translated image. Notes ----- For each NaN pixel in the input image, one or more pixels in the output image will be set to NaN, with the size of the pixel region affected depending on the interpolation order. All currently implemented rotation methods require a convolution step to handle image NaNs. This convolution normally uses :func:`scipy.signal.convolve2d`, but if `OpenCV <https://opencv.org>`__ is installed, the faster |cv2_filter2D|_ is used instead. See :func:`~sunpy.image.transform.add_rotation_function` for how to add a different rotation function. """ rmatrix = rmatrix / scale array_center = (np.array(image.shape)[::-1] - 1) / 2.0 # Make sure the image center is an array and is where it's supposed to be if image_center is not None: image_center = np.asanyarray(image_center) else: image_center = array_center # Determine center of rotation based on use (or not) of the recenter keyword if recenter: rot_center = array_center else: rot_center = image_center displacement = np.dot(rmatrix, rot_center) shift = image_center - displacement # While `use_scipy` is still supported, we have to check which method to actually use method = _get_transform_method(method, use_scipy) # Transform the image using the appropriate function rotated_image = _rotation_registry[method].function(image, rmatrix, shift, order, missing, clip) return rotated_image def _get_transform_method(method, use_scipy): # This is re-used in affine_transform and GenericMap.rotate if method not in _rotation_registry: raise ValueError(f'Method {method} not in supported methods: ' f'{_rotation_registry.keys()}') if use_scipy is not None: warn_deprecated("The 'use_scipy' argument is deprecated. " "Specify the rotation method to the 'method' " "keyword argument instead.") if use_scipy is True and method != 'scipy': warn_user(f"Using scipy instead of {method} for rotation.") method = 'scipy' if method == 'scikit-image': try: import skimage # NoQA except ImportError: raise ImportError("scikit-image must be installed to be usable for rotation.") return method def add_rotation_function(name, *, allowed_orders, handles_clipping, handles_image_nans, handles_nan_missing): """ Decorator to add a rotation function to the registry of selectable implementations. Each registered rotation function becomes a selectable option for :func:`sunpy.image.transform.affine_transform` and :meth:`sunpy.map.GenericMap.rotate`. Those two routines are required to handle clipping the output image, NaNs in the input image, and NaN as the value to use for pixels in the output image that are beyond the extent of the input image. If the supplied rotation function cannot provide one or more of these capabilities, the decorator is able to provide them instead. The decorator requires the parameters listed under ``Parameters``. The decorated rotation function must accept the parameters listed under ``Other Parameters`` in that order and return the rotated image. Parameters ---------- name : `str` The name that will be used to select the rotation function allowed_orders : `set` The allowed values for the ``order`` parameter. handles_clipping : `bool` Specifies whether the rotation function will internally perform clipping. If ``False``, the rotation function will always receive ``False`` for the ``clip`` input parameter. handles_image_nans : `bool` Specifies whether the rotation function will internally handle NaNs in the input image. If ``False``, the rotation function is guaranteed to be provided an image without any NaNs. handles_nan_missing : `bool` Specifies whether the rotation function will internally handle NaN as the ``missing`` value. If ``False``, the rotation function will never receive NaN, but instead receive a value in the input range of the image. Other Parameters ---------------- image : `numpy.ndarray` The image, which could be integers or floats matrix : `numpy.ndarray` that is 2x2 The linear transformation matrix (e.g., rotation+scale+skew) shift : 2-element `numpy.ndarray` The translational shift to apply to the image in each axis order : `int` The numerical parameter that controls the degree of interpolation missing : `float` The value to use for outside the bounds of the original image clip : `bool` Whether to clip the output image to the range of the input image Notes ----- The docstring of the rotation function should be a bulleted list of notes specific to the rotation function. It will be appended to ``Notes`` section of the docstring for :func:`~sunpy.image.transform.affine_transform`. The rotation function is supplied the input image directly, so the function should not modify the image in place. Setting any of the ``handles_*`` parameters to ``False`` means that computation will be performed to modify the image returned by the rotation function before it is returned to :func:`~sunpy.image.transform.affine_transform`. If the decorator is handling image NaNs on behalf of the rotation function (i.e., ``handles_image_nans=False``), pixels in the output image will be set to NaN if they are within a certain neighborhood size that depends on the ``order`` parameter. This step requires an additional image convolution, which might be avoidable if the rotation function were able to internally handle image NaNs. This convolution normally uses :func:`scipy.signal.convolve2d`, but if `OpenCV <https://opencv.org>`__ is installed, the faster |cv2_filter2D|_ is used instead. """ def decorator(rotation_function): @wraps(rotation_function) def wrapper(image, matrix, shift, order, missing, clip): if order not in allowed_orders: raise ValueError(f"{order} is one of the allowed orders for method '{name}': " f"{set(allowed_orders)}") clip_to_use = clip if handles_clipping else False # Check if missing is NaN and needs to be externally handled needs_missing_handling = not handles_nan_missing and np.isnan(missing) # Check if there are any NaNs in the image that need to be externally handled if not handles_image_nans: isnan = np.isnan(image) needs_nan_handling = np.any(isnan) else: needs_nan_handling = False # If either is needed, change the NaNs to the median of the input image if needs_missing_handling or needs_nan_handling: substitute = np.nanmedian(image) missing_to_use = substitute if needs_missing_handling else missing image_to_use = np.nan_to_num(image, nan=substitute) if needs_nan_handling else image t = time.perf_counter() rotated_image = rotation_function(image_to_use, matrix, shift, order, missing_to_use, clip_to_use) log.debug(f"{name} rotating image: {time.perf_counter() - t:.3f} s") # If needed, restore the NaNs if needs_nan_handling: # Use a convolution to find all pixels that are appreciably affected by NaNs # The kernel size depends on the interpolation order, but are empirically defined # because a given pixel can affect every other pixel under spline interpolation sizes = [1, 1, 5, 5, 7, 7] t = time.perf_counter() try: # If OpenCV is installed, its convolution function is much faster import cv2 expanded_nans = cv2.filter2D(isnan.astype(float), -1, np.ones((sizes[order], sizes[order])), borderType=cv2.BORDER_CONSTANT) except ImportError: expanded_nans = convolve2d(isnan.astype(float), np.ones((sizes[order], sizes[order])), mode='same') log.debug(f"{name} expanding image NaNs: {time.perf_counter() - t:.3f} s") t = time.perf_counter() rotated_nans = rotation_function(expanded_nans, matrix, shift, order=min(order, 1), missing=0, clip=False) rotated_image[rotated_nans > 0] = np.nan log.debug(f"{name} rotating image NaNs: {time.perf_counter()
ShotgunError("Unable to share thumbnail: %s" % result) return attachment_id def upload_thumbnail(self, entity_type, entity_id, path, **kwargs): """ Upload a file from a local path and assign it as the thumbnail for the specified entity. .. note:: Images will automatically be re-sized on the server to generate a size-appropriate image file. However, the original file is retained as well and is accessible when you click on the thumbnail image in the web UI. If you are using a local install of Shotgun and have not enabled S3, this can eat up disk space if you're uploading really large source images for your thumbnails. You can un-set (aka clear) a thumbnail on an entity using the :meth:`~shotgun_api3.Shotgun.update` method and setting the **image** field to ``None``. This will also unset the ``filmstrip_thumbnail`` field if it is set. Supported image file types include ``.jpg` and ``.png`` (preferred) but will also accept. ``.gif```, ``.tif``, ``.tiff``, ``.bmp``, ``.exr``, ``.dpx``, and ``.tga``. This method wraps over :meth:`~shotgun_api3.Shotgun.upload`. Additional keyword arguments passed to this method will be forwarded to the :meth:`~shotgun_api3.Shotgun.upload` method. :param str entity_type: Entity type to set the thumbnail for. :param int entity_id: Id of the entity to set the thumbnail for. :param str path: Full path to the thumbnail file on disk. :returns: Id of the new attachment """ return self.upload(entity_type, entity_id, path, field_name="thumb_image", **kwargs) def upload_filmstrip_thumbnail(self, entity_type, entity_id, path, **kwargs): """ Upload filmstrip thumbnail to specified entity. .. versionadded:: 3.0.9 Requires Shotgun server v3.1.0+ Uploads a file from a local directory and assigns it as the filmstrip thumbnail for the specified entity. The image must be a horizontal strip of any number of frames that are exactly 240 pixels wide. Therefore the whole strip must be an exact multiple of 240 pixels in width. The height can be anything (and will depend on the aspect ratio of the frames). Any image file type that works for thumbnails will work for filmstrip thumbnails. Filmstrip thumbnails will only be visible in the Thumbnail field on an entity if a regular thumbnail image is also uploaded to the entity. The standard thumbnail is displayed by default as the poster frame. Then, on hover, the filmstrip thumbnail is displayed and updated based on your horizontal cursor position for scrubbing. On mouseout, the default thumbnail is displayed again as the poster frame. The url for a filmstrip thumbnail on an entity is available by querying for the ``filmstrip_image field``. You can un-set (aka clear) a thumbnail on an entity using the :meth:`~shotgun_api3.Shotgun.update` method and setting the **image** field to ``None``. This will also unset the ``filmstrip_thumbnail`` field if it is set. This method wraps over :meth:`~shotgun_api3.Shotgun.upload`. Additional keyword arguments passed to this method will be forwarded to the :meth:`~shotgun_api3.Shotgun.upload` method. >>> filmstrip_thumbnail = '/data/show/ne2/100_110/anim/01.mlk-02b_filmstrip.jpg' >>> sg.upload_filmstrip_thumbnail("Version", 27, filmstrip_thumbnail) 87 :param str entity_type: Entity type to set the filmstrip thumbnail for. :param int entity_id: Id of the entity to set the filmstrip thumbnail for. :param str path: Full path to the filmstrip thumbnail file on disk. :returns: Id of the new Attachment entity created for the filmstrip thumbnail :rtype: int """ if not self.server_caps.version or self.server_caps.version < (3, 1, 0): raise ShotgunError("Filmstrip thumbnail support requires server version 3.1 or " "higher, server is %s" % (self.server_caps.version,)) return self.upload(entity_type, entity_id, path, field_name="filmstrip_thumb_image", **kwargs) def upload(self, entity_type, entity_id, path, field_name=None, display_name=None, tag_list=None): """ Upload a file to the specified entity. Creates an Attachment entity for the file in Shotgun and links it to the specified entity. You can optionally store the file in a field on the entity, change the display name, and assign tags to the Attachment. .. note:: Make sure to have retries for file uploads. Failures when uploading will occasionally happen. When it does, immediately retrying to upload usually works >>> mov_file = '/data/show/ne2/100_110/anim/01.mlk-02b.mov' >>> sg.upload("Shot", 423, mov_file, field_name="sg_latest_quicktime", ... display_name="Latest QT") 72 :param str entity_type: Entity type to link the upload to. :param int entity_id: Id of the entity to link the upload to. :param str path: Full path to an existing non-empty file on disk to upload. :param str field_name: The internal Shotgun field name on the entity to store the file in. This field must be a File/Link field type. :param str display_name: The display name to use for the file. Defaults to the file name. :param str tag_list: comma-separated string of tags to assign to the file. :returns: Id of the Attachment entity that was created for the image. :rtype: int :raises: :class:`ShotgunError` on upload failure. """ # Basic validations of the file to upload. path = os.path.abspath(os.path.expanduser(path or "")) # We need to check for string encodings that we aren't going to be able # to support later in the upload process. If the given path wasn't already # unicode, we will try to decode it as utf-8, and if that fails then we # have to raise a sane exception. This will always work for ascii and utf-8 # encoded strings, but will fail on some others if the string includes non # ascii characters. if not isinstance(path, six.text_type): try: path = path.decode("utf-8") except UnicodeDecodeError: raise ShotgunError( "Could not upload the given file path. It is encoded as " "something other than utf-8 or ascii. To upload this file, " "it can be string encoded as utf-8, or given as unicode: %s" % path ) if not os.path.isfile(path): raise ShotgunError("Path must be a valid file, got '%s'" % path) if os.path.getsize(path) == 0: raise ShotgunError("Path cannot be an empty file: '%s'" % path) is_thumbnail = (field_name in ["thumb_image", "filmstrip_thumb_image", "image", "filmstrip_image"]) # Supported types can be directly uploaded to Cloud storage if self._requires_direct_s3_upload(entity_type, field_name): return self._upload_to_storage(entity_type, entity_id, path, field_name, display_name, tag_list, is_thumbnail) else: return self._upload_to_sg(entity_type, entity_id, path, field_name, display_name, tag_list, is_thumbnail) def _upload_to_storage(self, entity_type, entity_id, path, field_name, display_name, tag_list, is_thumbnail): """ Internal function to upload a file to the Cloud storage and link it to the specified entity. :param str entity_type: Entity type to link the upload to. :param int entity_id: Id of the entity to link the upload to. :param str path: Full path to an existing non-empty file on disk to upload. :param str field_name: The internal Shotgun field name on the entity to store the file in. This field must be a File/Link field type. :param str display_name: The display name to use for the file. Defaults to the file name. :param str tag_list: comma-separated string of tags to assign to the file. :param bool is_thumbnail: indicates if the attachment is a thumbnail. :returns: Id of the Attachment entity that was created for the image. :rtype: int """ filename = os.path.basename(path) # Step 1: get the upload url is_multipart_upload = (os.path.getsize(path) > self._MULTIPART_UPLOAD_CHUNK_SIZE) upload_info = self._get_attachment_upload_info(is_thumbnail, filename, is_multipart_upload) # Step 2: upload the file # We upload large files in multiple parts because it is more robust # (and required when using S3 storage) if is_multipart_upload: self._multipart_upload_file_to_storage(path, upload_info) else: self._upload_file_to_storage(path, upload_info["upload_url"]) # Step 3: create the attachment url = urllib.parse.urlunparse((self.config.scheme, self.config.server, "/upload/api_link_file", None, None, None)) params = { "entity_type": entity_type, "entity_id": entity_id, "upload_link_info": upload_info["upload_info"] } params.update(self._auth_params()) if is_thumbnail: if field_name == "filmstrip_thumb_image" or field_name == "filmstrip_image": params["filmstrip"] = True else: if display_name is None: display_name = filename # we allow linking to nothing for generic reference use cases if field_name is not None: params["field_name"] = field_name params["display_name"] = display_name # None gets converted to a string and added as a tag... if tag_list: params["tag_list"] = tag_list result = self._send_form(url, params) if not result.startswith("1"): raise ShotgunError("Could not upload file successfully, but " "not sure why.\nPath: %s\nUrl: %s\nError: %s" % (path, url, result)) LOG.debug("Attachment linked to content on Cloud storage") attachment_id = int(result.split(":", 2)[1].split("\n", 1)[0]) return attachment_id def _upload_to_sg(self, entity_type, entity_id, path, field_name, display_name, tag_list, is_thumbnail): """ Internal function to upload a
<filename>tuplex/python/tuplex/context.py #!/usr/bin/env python3 #----------------------------------------------------------------------------------------------------------------------# # # # Tuplex: Blazing Fast Python Data Science # # # # # # (c) 2017 - 2021, Tuplex team # # Created by <NAME> first on 1/1/2021 # # License: Apache 2.0 # #----------------------------------------------------------------------------------------------------------------------# import logging from .libexec.tuplex import _Context, _DataSet from .dataset import DataSet import os import glob import sys import cloudpickle from tuplex.utils.common import flatten_dict, load_conf_yaml, stringify_dict, unflatten_dict, save_conf_yaml, in_jupyter_notebook, in_google_colab, is_in_interactive_mode, current_user, host_name import uuid import json from .metrics import Metrics class Context: def __init__(self, conf=None, name="", **kwargs): r"""creates new Context object, the main entry point for all operations with the Tuplex big data framework Args: conf (str) or (dict): Can be either the path to a YAML configuration file that is used to configure this \ particular Tuplex context or a dictionary with Tuplex configuration options. \ For keys and their meaning see below the list of Keyword Arguments. name (str): An optional name can be given to the context object. WHen given an empty string, \ Tuplex will choose a random name. **kwargs: Arbitrary keyword arguments, confer Keyword Arguments section for more information. Keyword Arguments: executorMemory (str) or (int): Specify how much memory each executor should use. If given as int, will be \ interpreted as number of bytes. Else, one can also specify a memory amount \ in string syntax, e.g. '1G' for 1GB of memory. executorCount (int): Number of executors (threads) to use. Defaults to ``std::thread::hardware_concurrency()`` driverMemory (str) or (int): ``executorMemory`` for the driver partitionSize (str) or (int): ``executorMemory`` will be divided in blocks of size ``partitionSize``. This also \ corresponds more or less 1:1 to the task size and is thus a parameter to tune \ parallelism. runTimeMemory (str) or (int): Each executor allocates besides the ``executorMemory`` a memory region that is used \ to store temporary objects when processing a single tuple. E.g. for string copy operations \ arrays etc. This key allows to set memory via a memory string or as integer in bytes. runTimeMemoryBlockSize (str) or (int): Size of blocks used to allocate ``runTimeMemory`` useLLVMOptimizer (str) or (bool): Specify whether LLVM Optimizers should be applied to generated LLVM IR or not. autoUpcast (str) or (bool): When transferring data to python, e.g. ``[1, 3.0, 4.0]`` the inferred type will be ``float``. \ When this parameter is set to ``True``, ``1`` will be automatically cast to ``float`` and no exception be raised. \ In case of the parameter being ``False``, tuple with data ``1`` will raise a ``ValueError``. allowUndefinedBehavior: (str) or (bool): When set to true, certain errors won't be raised, e.g. division by zero will be ignored. This allows for better speed. scratchDir (str): Tuplex allows to process larger than memory datasets. If the main memory budget is exceeded, executors will cache files at `scratchDir`. logDir (str): Tuplex produces a log file `log.txt` per default. Specify with `logDir` where to store it. historyDir (str): Tuplex stores the database and logs within this dir when the webui is enabled. normalcaseThreshold (float): used to detect the normal case webui (bool): whether to use the WebUI interface. By default true. webui.url (str): URL where to connect to for history server. Default: localhost webui.port (str): port to use when connecting to history server. Default: 6543 webui.mongodb.url (str): URL where to connect to MongoDB storage. If empty string, Tuplex will start and exit a local mongodb instance. webui.mongodb.port (int): port for MongoDB instance webui.mongodb.path (str): local path where to store files for MongoDB instance to be started. webui.exceptionDisplayLimit (int): How many exceptions to display in UI max, must be at least 1. csv.maxDetectionRows (int): maximum number of rows to determine types for CSV files. csv.maxDetectionMemory (str) or (int): maximum number of bytes to use when performing type detection, separator inference, etc. over CSV files. csv.separators (list): list of single character strings that are viable separators when autodetecting. E.g. ``[','. ';', '\t']``. csv.quotechar (str): single character denoting the character that is used as quote char according to RFC-4180 standard. E.g. ``'"'`` csv.comments (str): list of single character string which indicate start of a comment line, e.g. ``['#', '~']`` csv.generateParser (str) or (bool): Whether to use C++ parser or a LLVM code generated parser csv.selectionPushdown (str) or (bool): When enabled, then the physical planner will generate a parser that \ only serializes data that is required within the pipeline. """ runtime_path = os.path.join(os.path.dirname(__file__), 'libexec', 'tuplex_runtime') paths = glob.glob(runtime_path + '*') if len(paths) != 1: if len(paths) == 0: logging.error("found no tuplex runtime (tuplex_runtime.so). Faulty installation?") else: logging.error('found following candidates for tuplex runtime:\n{}, please specify which to use.'.format(paths)) sys.exit(1) # pass configuration options # (1) check if conf is a dictionary or a string options = dict() if conf: if isinstance(conf, str): # need to load yaml file options = flatten_dict(load_conf_yaml(conf)) elif isinstance(conf, dict): # update dict with conf options.update(flatten_dict(conf)) # (2) update options with kwargs options.update(kwargs) # (3) stringify to get to backend via boost python options = stringify_dict(options) user = current_user() name = name if len(name) > 0 else 'context' + str(uuid.uuid4())[:8] mode = 'file' if is_in_interactive_mode(): mode = 'shell' if in_jupyter_notebook(): mode = 'jupyter' if in_google_colab(): mode = 'colab' host = host_name() # pass above options as env.user, ... # also pass runtime path like that options['tuplex.env.user'] = str(user) options['tuplex.env.hostname'] = str(host) options['tuplex.env.mode'] = str(mode) # update runtime path according to user if 'tuplex.runTimeLibrary' in options: runtime_path = options['tuplex.runTimeLibrary'] # @Todo: autostart mongodb & history server if they are not running yet... # last arg are the options as json string serialized b.c. of boost python problems self._context = _Context(name, runtime_path, json.dumps(options)) pyth_metrics = self._context.getMetrics() assert pyth_metrics self.metrics = Metrics(pyth_metrics) assert self.metrics def parallelize(self, value_list, columns=None, schema=None): """ passes data to the Tuplex framework. Must be a list of primitive objects (e.g. of type bool, int, float, str) or a list of (nested) tuples of these types. Args: value_list (list): a list of objects to pass to the Tuplex backend. columns (list): a list of strings or None to pass to the Tuplex backend in order to name the columns. Allows for dict access in functions then. schema: a schema defined as tuple of typing types. If None, then most likely schema will be inferred. Returns: Tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations """ assert isinstance(value_list, list), "data must be given as a list of objects" cols = [] if not columns: if len(value_list) > 0: num_cols = 1 if isinstance(value_list[0], (list, tuple)): num_cols = len(value_list[0]) cols = ['column{}'.format(i) for i in range(num_cols)] else: cols = columns for col in cols: assert isinstance(col, str), 'element {} must be a string'.format(col) ds = DataSet() ds._dataSet = self._context.parallelize(value_list, columns, schema) return ds def csv(self, pattern, columns=None, header=None, delimiter=None, quotechar='"', null_values=[''], type_hints={}): """ reads csv (comma separated values) files. This function may either be provided with parameters that help to determine the delimiter, whether a header present or what kind of quote char is used. Overall, CSV parsing is done according to the RFC-4180 standard (cf. https://tools.ietf.org/html/rfc4180) Args: pattern (str): a file glob pattern, e.g. /data/file.csv or /data/\*.csv or /\*/\*csv columns (list): optional list of columns, will be used as header for the CSV file. If header is True, the first line will be automatically checked against the column names. If header is None, then it will be inferred whether a header is present and a check against the columns performed. header (bool): optional argument, if set to None Tuplex will automatically infer whether a header is present or not. delimiter (str): optional argument, if set Tuplex will use this as delimiter. If set to None, Tuplex will automatically infer the delimiter. quotechar (str): defines quoting according to RFC-4180. null_values (list): list of strings to be identified as null value, i.e. they will be parsed as None type_hints (dict): dictionary of hints for column types. Columns can be index either using
<gh_stars>1-10 import time import requests from mock import patch from pyramid_sna.compat import urlparse from eduid_signup.testing import FunctionalTests import pprint import logging logger = logging.getLogger(__name__) EXISTING_USER = { 'id': '789', 'name': '<NAME>', 'given_name': 'John', 'family_name': 'Smith', 'email': '<EMAIL>', } NEW_USER = { 'id': '789', 'name': '<NAME>', 'given_name': 'John', 'family_name': 'Brown', 'email': '<EMAIL>', } class HomeViewTests(FunctionalTests): def test_home(self): res = self.testapp.get('/') self.assertEqual(res.status, '200 OK') res.mustcontain( 'Welcome to eduID!', 'Create an account for use with Swedish Universities.', 'Sign up with your email', 'Sign up with Facebook', 'Sign up with Google', ) def test_home(self): res = self.testapp.get('/') self.assertEqual(res.status, '200 OK') res.mustcontain('Welcome to eduID') def test_sign_up_with_bad_email(self): res = self.testapp.post('/', {'email': 'a@com'}) self.assertEqual(res.status, '200 OK') res.mustcontain('Email is not valid') def test_sign_up_with_good_email(self): res = self.testapp.post('/', {'email': '<EMAIL>'}) self.assertEqual(res.status, '302 Found') self.assertEqual(res.location, 'http://localhost/trycaptcha/') class SuccessViewTests(FunctionalTests): def test_success(self): self.add_to_session({'email': '<EMAIL>'}) res = self.testapp.get('/success/') self.assertEqual(res.status, '200 OK') def test_success_no_email(self): res = self.testapp.get('/success/') self.assertEqual(res.status, '302 Found') self.assertEqual(res.location, 'http://localhost/') def test_favicon(self): res = self.testapp.get('/favicon.ico') self.assertEqual(res.status, '302 Found') def test_resend_verification(self): self.add_to_session({'email': '<EMAIL>'}) res = self.testapp.post('/resend_email_verification/') self.assertEqual(res.status, '302 Found') self.assertEqual(res.location, 'http://localhost/success/') def test_resend_verification_get(self): self.add_to_session({'email': '<EMAIL>'}) res = self.testapp.get('/resend_email_verification/') self.assertEqual(res.status, '200 OK') class HelpViewTests(FunctionalTests): def test_default_language(self): res = self.testapp.get('/help/') self.assertEqual(res.status, '200 OK') res.mustcontain('Frequently Asked Questions') def test_help_in_english(self): res = self.testapp.get('/help/', headers={ 'Accept-Language': 'en', }) self.assertEqual(res.status, '200 OK') res.mustcontain('Frequently Asked Questions') def test_help_in_swedish(self): res = self.testapp.get('/help/', headers={ 'Accept-Language': 'sv', }) self.assertEqual(res.status, '200 OK') res.mustcontain('Vart kan jag') def test_help_in_unknown_language(self): res = self.testapp.get('/help/', headers={ 'Accept-Language': 'xx', }) self.assertEqual(res.status, '200 OK') res.mustcontain('Frequently Asked Questions') class SignupAppTest(FunctionalTests): def setUp(self): super(SignupAppTest, self).setUp() from eduid_signup import views mock_config = { 'return_value': ('x', 'y'), } self.patcher = patch.object(views, 'generate_password', **mock_config) self.patcher.start() def tearDown(self): super(SignupAppTest, self).tearDown() self.patcher.stop() def _start_and_solve_captcha(self, email, check_captcha_post_result=True, userdb_count=2, signup_userdb_count=0): home_post = self.testapp.post('/', {'email': email}) self.assertEqual(home_post.status, '302 Found') self.assertEqual(home_post.location, 'http://localhost/trycaptcha/') # ensure known starting point self.assertEqual(self.amdb.db_count(), userdb_count) self.assertEqual(self.signup_userdb.db_count(), signup_userdb_count) captcha_get = self.testapp.get('/trycaptcha/') captcha_post = captcha_get.form.submit('foo') if check_captcha_post_result: self.assertEqual(captcha_post.status, '302 Found') self.assertEqual(captcha_post.location, 'http://localhost/success/') return captcha_post def _get_new_signup_user(self, email): signup_user = self.signup_userdb.get_user_by_pending_mail_address(email) if not signup_user: self.fail("User could not be found using pending mail address") logger.debug("User in database after e-mail would have been sent:\n{!s}".format( pprint.pformat(signup_user.to_dict()) )) return signup_user def _create_account(self, captcha_post): res4 = self.testapp.get(captcha_post.location) self.assertEqual(res4.status, '200 OK') res4.mustcontain('Account created successfully') # Should be one user in the signup_userdb now self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 1) class SNATests(SignupAppTest): """ Tests of the complete signup process using Social Network site """ def test_google_signup(self): # Verify known starting point (empty ToU database) self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 0) self._google_login(NEW_USER) rfi_post = self._review_fetched_info() # Verify there is now one user in the signup userdb self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 1) self._complete_registration(rfi_post) # Verify there is now one more user in the central eduid user database self.assertEqual(self.amdb.db_count(), 3) self.assertEqual(self.signup_userdb.db_count(), 0) user = self.amdb.get_user_by_mail(NEW_USER['email']) self.assertTrue(user.tou.has_accepted(self.settings['tou_version'])) def test_google_existing_user(self): self._google_login(EXISTING_USER) rfi_get = self._review_fetched_info_get() self.assertEqual(rfi_get.location, 'http://localhost/email_already_registered/') self.assertEqual(self.signup_userdb.db_count(), 0) def test_google_retry(self): # call the login to fill the session res1 = self.testapp.get('/google/login', { 'next_url': 'https://localhost/foo/bar', }) # now, retry self._google_login(NEW_USER) self._review_fetched_info() self.assertEqual(self.signup_userdb.db_count(), 1) def test_signup_with_good_email_and_then_google(self): captcha_post = self._start_and_solve_captcha(NEW_USER['email'].upper()) self._create_account(captcha_post) self._get_new_signup_user(NEW_USER['email']) # Now, verify the signup process can be completed by the user # switching to the Social Network (google) track instead logger.debug("\n\nUser switching to Social signup instead\n\n") self._google_login(NEW_USER) rfi_post = self._review_fetched_info(userdb_count=2, signup_userdb_count=1) # Verify there is still one user in the signup userdb self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 1) self._complete_registration(rfi_post) # Verify there is now one more user in the central eduid user database self.assertEqual(self.amdb.db_count(), 3) def test_google_abort(self): self._google_login(NEW_USER) rfi_get = self._review_fetched_info_get() # Simulate clicking the Cancel button rfi_post = rfi_get.form.submit('cancel') # Check that the result was a redirect to / self.assertEqual(rfi_post.status, '302 Found') self.assertRegexpMatches(rfi_post.location, 'http://localhost/') # Verify no user has been created self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 0) def test_google_bad_request(self): # call the login to fill the session self._google_login(NEW_USER) self.add_to_session({'dummy': 'dummy'}) res = self.testapp.get('/review_fetched_info/', status=400) self.assertEqual(self.signup_userdb.db_count(), 0) def test_google_cancel(self): # call the login to fill the session self._google_login(NEW_USER) res = self.testapp.get('/review_fetched_info/') self.assertEqual(self.signup_userdb.db_count(), 0) res = res.form.submit('cancel') self.assertEqual(res.status, '302 Found') self.assertEqual(self.signup_userdb.db_count(), 0) self.assertEqual(res.location, 'http://localhost/') def test_google_tou(self): # call the login to fill the session self._google_login(NEW_USER) res = self.testapp.get('/review_fetched_info/') res = res.form.submit('action') self.assertEqual(res.status, '302 Found') self.testapp.get(res.location) user = self.amdb.get_user_by_mail(NEW_USER['email']) self.assertTrue(user.tou.has_accepted(self.settings['tou_version'])) def _google_callback(self, state, user): with patch('requests.post') as fake_post: # taken from pyramid_sna fake_post.return_value.status_code = 200 fake_post.return_value.json = lambda: { 'access_token': '<PASSWORD>', } with patch('requests.get') as fake_get: fake_get.return_value.status_code = 200 fake_get.return_value.json = lambda: user res = self.testapp.get('/google/callback', { 'code': '1234', 'state': state, }) def _google_login(self, userdata): # call the login to fill the session res1 = self.testapp.get('/google/login', { 'next_url': 'https://localhost/foo/bar', }) # # Check that the result was a redirect to Google OAUTH endpoint self.assertEqual(res1.status, '302 Found') self.assertRegexpMatches(res1.location, '^https://accounts.google.com/o/oauth2/auth?') url = urlparse.urlparse(res1.location) query = urlparse.parse_qs(url.query) state = query['state'][0] self._google_callback(state, userdata) def _review_fetched_info(self, userdb_count=2, signup_userdb_count=0): """ Perform both the GET and subsequent POST steps of `review_fetched_info'. """ rfi_get = self._review_fetched_info_get(userdb_count=userdb_count, signup_userdb_count=signup_userdb_count) return self._review_fetched_info_post(rfi_get) def _review_fetched_info_get(self, userdb_count=2, signup_userdb_count=0): # ensure known starting point self.assertEqual(self.amdb.db_count(), userdb_count) self.assertEqual(self.signup_userdb.db_count(), signup_userdb_count) rfi_get = self.testapp.get('/review_fetched_info/') self.assertEqual(self.amdb.db_count(), userdb_count) self.assertEqual(self.signup_userdb.db_count(), signup_userdb_count) return rfi_get def _review_fetched_info_post(self, rfi_get): rfi_post = rfi_get.form.submit('action') # Check that the result was a redirect to /sna_account_created/ self.assertEqual(rfi_post.status, '302 Found') self.assertRegexpMatches(rfi_post.location, '/sna_account_created/') return rfi_post def _complete_registration(self, rfi_post): from vccs_client import VCCSClient with patch.object(VCCSClient, 'add_credentials', clear=True): VCCSClient.add_credentials.return_value = 'faked while testing' res = self.testapp.get(rfi_post.location) res.mustcontain('You can now log in') for retry in range(3): time.sleep(0.1) if self.signup_userdb.db_count() == 0: # User was removed from SignupUserDB by attribute manager plugin after # the new user was properly synced to the central UserDB - all done break if self.signup_userdb.db_count(): self.fail('SignupUserDB user count never went back to zero') return res class SignupEmailTests(SignupAppTest): """ Test of the complete signup process using an e-mail address """ def test_signup_with_good_email(self): self._start_and_solve_captcha(NEW_USER['email'].upper()) # Should be one user in the signup_userdb now self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 1) user = self._get_new_signup_user(NEW_USER['email']) from vccs_client import VCCSClient with patch.object(VCCSClient, 'add_credentials', clear=True): VCCSClient.add_credentials.return_value = 'faked while testing' # Visit the confirmation LINK to confirm the e-mail address verify_link = "/email_verification/{code!s}/".format(code = user.pending_mail_address.verification_code) res4 = self.testapp.get(verify_link) self.assertEqual(res4.status, '200 OK') res4.mustcontain('You can now log in') def test_signup_with_existing_email(self): captcha_post = self._start_and_solve_captcha(EXISTING_USER['email'], check_captcha_post_result=False) self.assertEqual(captcha_post.status, '302 Found') self.assertEqual(captcha_post.location, 'http://localhost/email_already_registered/') # Should NOT have created any new user self.assertEqual(self.amdb.db_count(), 2) self.assertEqual(self.signup_userdb.db_count(), 0) res = self.testapp.get(captcha_post.location) self.assertEqual(res.status, '200 OK') res.mustcontain('Email address already in use') def test_signup_with_good_email_twice(self): captcha_post = self._start_and_solve_captcha('<EMAIL>') self._create_account(captcha_post) user1 = self._get_new_signup_user('<EMAIL>') logger.debug("\n\nSignup AGAIN\n\n") # Sign up again, with same e-mail captcha_post2 = self._start_and_solve_captcha('<EMAIL>', check_captcha_post_result=False, userdb_count=2, signup_userdb_count=1, ) res4 = self.testapp.get(captcha_post2.location) self.assertEqual(res4.status, '200 OK') res4.mustcontain('Email address already in use') res5 = res4.form.submit('foo') self.assertEqual(res5.status, '302 Found') self.assertEqual(res5.location, 'http://localhost/success/') # Check that users pending mail address has been updated with a new verification code user2 = self._get_new_signup_user('<EMAIL>') self.assertEqual(user1.user_id, user2.user_id) self.assertEqual(user1.pending_mail_address.email, user2.pending_mail_address.email) self.assertNotEqual(user1.pending_mail_address.verification_code, user2.pending_mail_address.verification_code) def test_signup_with_good_email_and_wrong_code(self): captcha_post = self._start_and_solve_captcha('<EMAIL>') self._create_account(captcha_post) # Visit the confirmation LINK to try to confirm the e-mail address # using the wrong code in the URL. Note that it's unlikely that the # user provided the wrong code by simply clicking on the LINK # the first time and more likely that the user sent the "wrong"-code # by clicking on the LINK a second time. verify_link = "/email_verification/{code!s}/".format(code = 'not-the-right-code-in-link') res4 = self.testapp.get(verify_link) self.assertEqual(res4.status, '200 OK') res4.mustcontain('You have probably already signed up') def test_signup_confirm_using_form(self): captcha_post = self._start_and_solve_captcha('<EMAIL>') self._create_account(captcha_post) user = self._get_new_signup_user('<EMAIL>') from vccs_client import VCCSClient with patch.object(VCCSClient, 'add_credentials', clear=True): VCCSClient.add_credentials.return_value = 'faked while testing' # Visit the confirmation FORM to confirm the e-mail address res5 = self.testapp.get('/verification_code_form/') self.assertEqual(res5.status, '200 OK') res5.mustcontain('verification-code-input') res5.forms[0]['code'] = user.pending_mail_address.verification_code res6 = res5.forms[0].submit('foo') self.assertEqual(res6.status, '200 OK') res6.mustcontain('You can now log in') class MockCapchaTests(FunctionalTests): mock_users_patches = [] def setUp(self): super(MockCapchaTests, self).setUp() self.testapp.app.registry.settings['recaptcha_public_key'] = 'key' self.patcher_captcha = patch('eduid_signup.views.verify_recaptcha') self.captcha_mock = self.patcher_captcha.start() self.captcha_mock.return_value = True from eduid_signup.vccs import vccs_client class MockClient: def add_credentials(self, *args, **kwargs): return True mock_config2 = { 'return_value': MockClient(), } self.patcher_vccs = patch.object(vccs_client, 'VCCSClient', **mock_config2) self.patcher_vccs.start() def tearDown(self): super(MockCapchaTests, self).tearDown() self.patcher_captcha.stop() self.patcher_vccs.stop() def _start_registration(self, email='<EMAIL>'): res = self.testapp.post('/', {'email': email}) self.assertEqual(res.status, '302 Found') self.assertEqual(res.location, 'http://localhost/trycaptcha/') res = self.testapp.get(res.location) self.assertEqual(self.signup_userdb.db_count(), 0) res = res.form.submit() self.assertEqual(res.status, '302 Found') self.assertEqual(res.location, 'http://localhost/success/') self.assertEqual(self.signup_userdb.db_count(), 1) registered = self.signup_userdb.get_user_by_pending_mail_address(email) self.assertEqual(registered.pending_mail_address.is_verified, False) return registered def _complete_registration(self, email='<EMAIL>'): registered = self._start_registration(email=email) code = registered.pending_mail_address.verification_code url = 'http://localhost/email_verification/%s/' % code res = self.testapp.get(url) self.assertEqual(res.status, '200 OK') new_user = self.amdb.get_user_by_mail(email) return new_user def test_set_invalid_method(self): self.add_to_session({'email': '<EMAIL>'})
#!/usr/bin/env python """A keyword index of client machines. An index of client machines, associating likely identifiers to client IDs. """ from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from future.builtins import map from future.builtins import range from future.utils import iteritems from future.utils import itervalues from future.utils import string_types from typing import Text from grr_response_core.lib import rdfvalue from grr_response_core.lib import utils from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.util import precondition from grr_response_server import aff4 from grr_response_server import data_store from grr_response_server import keyword_index from grr_response_server.aff4_objects import aff4_grr def CreateClientIndex(token=None): return aff4.FACTORY.Create( rdfvalue.RDFURN("aff4:/client_index"), aff4_type=AFF4ClientIndex, mode="rw", object_exists=True, token=token) class AFF4ClientIndex(keyword_index.AFF4KeywordIndex): """An index of client machines.""" START_TIME_PREFIX = "start_date:" START_TIME_PREFIX_LEN = len(START_TIME_PREFIX) END_TIME_PREFIX = "end_date:" END_TIME_PREFIX_LEN = len(END_TIME_PREFIX) # We accept and return client URNs, but store client ids, # e.g. "C.00aaeccbb45f33a3". def _ClientIdFromURN(self, urn): return urn.Basename() def _NormalizeKeyword(self, keyword): # We're not sure about the type here, so converting the string to # unicode first to lower it properly and then converting it back # to utf-8 bytestring, since it's what the rest of the code is # expecting. # TODO(user): deprecate this code and make sure that ClientIndex # implementation below doesn't rely on such hacks. return utils.SmartUnicode(keyword).lower() def _AnalyzeKeywords(self, keywords): start_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("180d") end_time = rdfvalue.RDFDatetime(self.LAST_TIMESTAMP) filtered_keywords = [] unversioned_keywords = [] for k in keywords: if k.startswith(self.START_TIME_PREFIX): try: start_time = rdfvalue.RDFDatetime.FromHumanReadable( k[self.START_TIME_PREFIX_LEN:]) except ValueError: pass elif k.startswith(self.END_TIME_PREFIX): try: end_time = rdfvalue.RDFDatetime.FromHumanReadable( k[self.END_TIME_PREFIX_LEN:], eoy=True) except (TypeError, ValueError): pass elif k[0] == "+": kw = k[1:] filtered_keywords.append(kw) unversioned_keywords.append(kw) else: filtered_keywords.append(k) if not filtered_keywords: filtered_keywords.append(".") return start_time, end_time, filtered_keywords, unversioned_keywords def LookupClients(self, keywords): """Returns a list of client URNs associated with keywords. Args: keywords: The list of keywords to search by. Returns: A list of client URNs. Raises: ValueError: A string (single keyword) was passed instead of an iterable. """ if isinstance(keywords, string_types): raise ValueError( "Keywords should be an iterable, not a string (got %s)." % keywords) start_time, end_time, filtered_keywords, unversioned_keywords = ( self._AnalyzeKeywords(keywords)) last_seen_map = None if unversioned_keywords: last_seen_map = {} # TODO(user): Make keyword index datetime aware so that # AsMicrosecondsSinceEpoch is unnecessary. raw_results = self.Lookup( list(map(self._NormalizeKeyword, filtered_keywords)), start_time=start_time.AsMicrosecondsSinceEpoch(), end_time=end_time.AsMicrosecondsSinceEpoch(), last_seen_map=last_seen_map) if not raw_results: return [] if unversioned_keywords: universal_last_seen_raw = {} self.ReadPostingLists( list(map(self._NormalizeKeyword, raw_results)), start_time=start_time.AsMicrosecondsSinceEpoch(), end_time=end_time.AsMicrosecondsSinceEpoch(), last_seen_map=universal_last_seen_raw) universal_last_seen = {} for (_, client_id), ts in iteritems(universal_last_seen_raw): universal_last_seen[client_id] = ts old_results = set() for keyword in unversioned_keywords: for result in raw_results: if last_seen_map[(keyword, result)] < universal_last_seen[result]: old_results.add(result) raw_results -= old_results return [rdf_client.ClientURN(result) for result in raw_results] def ReadClientPostingLists(self, keywords): """Looks up all clients associated with any of the given keywords. Args: keywords: A list of keywords we are interested in. Returns: A dict mapping each keyword to a list of matching clients. """ start_time, end_time, filtered_keywords, _ = self._AnalyzeKeywords(keywords) # TODO(user): Make keyword index datetime aware so that # AsMicrosecondsSinceEpoch is unecessary. return self.ReadPostingLists( filtered_keywords, start_time=start_time.AsMicrosecondsSinceEpoch(), end_time=end_time.AsMicrosecondsSinceEpoch()) def AnalyzeClient(self, client): """Finds the client_id and keywords for a client. Args: client: A VFSGRRClient record to find keywords for. Returns: A tuple (client_id, keywords) where client_id is the client identifier and keywords is a list of keywords related to client. """ client_id = self._ClientIdFromURN(client.urn) # Start with both the client id itself, and a universal keyword, used to # find all clients. # # TODO(user): Remove the universal keyword once we have a better way # to do this, i.e., once we have a storage library which can list all # clients directly. keywords = [self._NormalizeKeyword(client_id), "."] def TryAppend(prefix, keyword): precondition.AssertType(prefix, Text) if keyword: keyword_string = self._NormalizeKeyword(Text(keyword)) keywords.append(keyword_string) if prefix: keywords.append(prefix + ":" + keyword_string) def TryAppendPrefixes(prefix, keyword, delimiter): if keyword is None: return 0 TryAppend(prefix, keyword) segments = keyword.split(delimiter) for i in range(1, len(segments)): TryAppend(prefix, delimiter.join(segments[0:i])) return len(segments) def TryAppendIP(ip): TryAppend("ip", ip) # IP4v? if TryAppendPrefixes("ip", str(ip), ".") == 4: return # IP6v? TryAppendPrefixes("ip", str(ip), ":") def TryAppendMac(mac): TryAppend("mac", mac) if len(mac) == 12: # If looks like a mac address without ":" symbols, also add the keyword # with them. TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)])) s = client.Schema TryAppend("host", client.Get(s.HOSTNAME)) TryAppendPrefixes("host", client.Get(s.HOSTNAME), "-") TryAppend("host", client.Get(s.FQDN)) TryAppendPrefixes("host", client.Get(s.FQDN), ".") TryAppend("", client.Get(s.SYSTEM)) TryAppend("", client.Get(s.UNAME)) TryAppend("", client.Get(s.OS_RELEASE)) TryAppend("", client.Get(s.OS_VERSION)) TryAppend("", client.Get(s.KERNEL)) TryAppend("", client.Get(s.ARCH)) kb = client.Get(s.KNOWLEDGE_BASE) if kb: for user in kb.users: TryAppend("user", user.username) TryAppend("", user.full_name) if user.full_name: for name in user.full_name.split(): # full_name often includes nicknames and similar, wrapped in # punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common # wrapping characters. TryAppend("", name.strip("\"'()")) for username in client.Get(s.USERNAMES, []): TryAppend("user", username) for interface in client.Get(s.INTERFACES, []): if interface.mac_address: TryAppendMac(interface.mac_address.human_readable_address) for ip in interface.GetIPAddresses(): TryAppendIP(ip) # We should have all mac and ip addresses already, but some test data only # has it attached directly, so just in case we look there also. if client.Get(s.MAC_ADDRESS): for mac in str(client.Get(s.MAC_ADDRESS)).split("\n"): TryAppendMac(mac) ip_list = client.Get(s.HOST_IPS, "") for ip in str(ip_list).split("\n"): TryAppendIP(ip) client_info = client.Get(s.CLIENT_INFO) if client_info: TryAppend("client", client_info.client_name) TryAppend("client", client_info.client_version) if client_info.labels: for label in client_info.labels: TryAppend("label", label) for label in client.GetLabelsNames(): TryAppend("label", label) return client_id, keywords def AddClient(self, client): """Adds a client to the index. Args: client: A VFSGRRClient record to add or update. """ client_id, keywords = self.AnalyzeClient(client) self.AddKeywordsForName(client_id, keywords) def RemoveClientLabels(self, client): """Removes all labels for a given client object. Args: client: A VFSGRRClient record. """ keywords = [] for label in client.GetLabelsNames(): keyword = self._NormalizeKeyword(utils.SmartStr(label)) # This might actually delete a keyword with the same name as the label (if # there is one). Usually the client keywords will be rebuilt after the # deletion of the old labels though, so this can only destroy historic # index data; normal search functionality will not be affected. keywords.append(keyword) keywords.append("label:%s" % keyword) self.RemoveKeywordsForName(self._ClientIdFromURN(client.urn), keywords) def GetClientURNsForHostnames(hostnames, token=None): """Gets all client_ids for a given list of hostnames or FQDNS. Args: hostnames: A list of hostnames / FQDNs. token: An ACL token. Returns: A dict with a list of all known GRR client_ids for each hostname. """ if data_store.RelationalDBReadEnabled(): index = ClientIndex() else: index = CreateClientIndex(token=token) keywords = set() for hostname in hostnames: if hostname.startswith("host:"): keywords.add(hostname) else: keywords.add("host:%s" % hostname) results = index.ReadClientPostingLists(keywords) result = {} for keyword, hits in iteritems(results): result[keyword[len("host:"):]] = hits return result def GetMostRecentClient(client_list, token=None): """Return most recent client from list of clients.""" last = rdfvalue.RDFDatetime(0) client_urn = None for client in aff4.FACTORY.MultiOpen(client_list, token=token): client_last = client.Get(client.Schema.LAST) if client_last > last: last = client_last client_urn = client.urn return client_urn class ClientIndex(object): """An index of client machines.""" START_TIME_PREFIX = "start_date:" START_TIME_PREFIX_LEN = len(START_TIME_PREFIX) def _NormalizeKeyword(self, keyword): return Text(keyword).lower() def _AnalyzeKeywords(self, keywords): """Extracts a start time from a list of keywords if present.""" start_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("180d") filtered_keywords = [] for k in keywords: if k.startswith(self.START_TIME_PREFIX): try: start_time = rdfvalue.RDFDatetime.FromHumanReadable( k[self.START_TIME_PREFIX_LEN:]) except ValueError: pass else: filtered_keywords.append(k) if not filtered_keywords: filtered_keywords.append(".") return start_time, filtered_keywords def LookupClients(self, keywords): """Returns a list of client URNs associated with keywords. Args: keywords: The list of keywords to search by. Returns: A list of client URNs. Raises: ValueError: A string (single keyword) was passed instead of an iterable. """ if isinstance(keywords, string_types): raise ValueError( "Keywords should be an iterable, not a string (got %s)." % keywords) start_time, filtered_keywords = self._AnalyzeKeywords(keywords) keyword_map = data_store.REL_DB.ListClientsForKeywords( list(map(self._NormalizeKeyword, filtered_keywords)), start_time=start_time) results = itervalues(keyword_map) relevant_set = set(next(results)) for hits in results: relevant_set &= set(hits) if not relevant_set: return [] return sorted(relevant_set) def ReadClientPostingLists(self, keywords): """Looks up all clients associated with any of the given keywords. Args: keywords: A list of keywords we are interested in. Returns: A dict mapping each keyword to a list of matching clients. """ start_time, filtered_keywords = self._AnalyzeKeywords(keywords) return data_store.REL_DB.ListClientsForKeywords( filtered_keywords, start_time=start_time) def AnalyzeClient(self, client): """Finds the client_id and keywords for a client. Args: client: A Client object record to find keywords for. Returns: A list of keywords related to client. """ # Start with a universal keyword, used to find all clients. # # TODO(user): Remove the universal keyword once we have a better way # to do this, i.e., once we
extension), then the # path should be rejected, otherwise it should validate successfully. If # the path is accepted, the application should display the user notice # associated with anyPolicy in the intermediate certificate TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]), ], '4.11.1': [ # Invalid inhibitPolicyMapping Test1 # Procedure: Validate Invalid inhibitPolicyMapping Test1 EE using the # default settings or open and verify Signed Test Message 6.2.2.108 using # the default settings. # # Expected Result: The authorities-constrained-policy-set and the # user-constrained-policy-set will be empty. The explicit-policy-indicator # will be set. The path should not validate successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.2': [ # Valid inhibitPolicyMapping Test2 # Procedure: Validate Valid inhibitPolicyMapping Test2 EE using the default # settings or open and verify Signed Test Message 6.2.2.109 using the # default settings. # # Expected Result: The authorities-constrained-policy-set will be # {NIST-test-policy-1} and the explicit-policy-indicator will be set. If # the initial-policy-set is any-policy or otherwise includes # NIST-test-policy-1, then the path should validate successfully. TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]), ], '4.11.3': [ # Invalid inhibitPolicyMapping Test3 # Procedure: Validate Invalid inhibitPolicyMapping Test3 EE using the # default settings or open and verify Signed Test Message 6.2.2.110 using # the default settings. # # Expected Result: The authorities-constrained-policy-set and the # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.4': [ # Valid inhibitPolicyMapping Test4 # Procedure: Validate Valid inhibitPolicyMapping Test4 EE using the default # settings or open and verify Signed Test Message 6.2.2.111 using the # default settings. # # Expected Result: The authorities-constrained-policy-set will be # {NIST-test-policy-2} and the explicit-policy-indicator will be set. If # the initial-policy-set is any-policy or otherwise includes # NIST-test-policy-2, then the path should validate successfully. TestInfo(True, user_constrained_policy_set=[TEST_POLICY_2]), ], '4.11.5': [ # Invalid inhibitPolicyMapping Test5 # Procedure: Validate Invalid inhibitPolicyMapping Test5 EE using the # default settings or open and verify Signed Test Message 6.2.2.112 using # the default settings. # # Expected Result: The authorities-constrained-policy-set and the # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.6': [ # Invalid inhibitPolicyMapping Test6 # Procedure: Validate Invalid inhibitPolicyMapping Test6 EE using the # default settings or open and verify Signed Test Message 172.16.31.103 using # the default settings. # # Expected Result: The authorities-constrained-policy-set and the # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.7': [ # Valid Self-Issued inhibitPolicyMapping Test7 # Procedure: Validate Valid Self-Issued inhibitPolicyMapping Test7 EE using # the default settings or open and verify Signed Test Message 172.16.17.32 # using the default settings. # # Expected Result: The authorities-constrained-policy-set will be # {NIST-test-policy-1} and the explicit-policy-indicator will be set. If # the initial-policy-set is any-policy or otherwise includes # NIST-test-policy-1, then the path should validate successfully. TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]), ], '4.11.8': [ # Invalid Self-Issued inhibitPolicyMapping Test8 # Procedure: Validate Invalid Self-Issued inhibitPolicyMapping Test8 EE # using the default settings or open and verify Signed Test Message # 172.16.58.3 using the default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.9': [ # Invalid Self-Issued inhibitPolicyMapping Test9 # Procedure: Validate Invalid Self-Issued inhibitPolicyMapping Test9 EE # using the default settings or open and verify Signed Test Message # 172.16.31.106 using the default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.10': [ # Invalid Self-Issued inhibitPolicyMapping Test10 # Procedure: Validate Invalid Self-Issued inhibitPolicyMapping Test10 EE # using the default settings or open and verify Signed Test Message # 172.16.17.32 using the default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.11.11': [ # Invalid Self-Issued inhibitPolicyMapping Test11 # Procedure: Validate Invalid Self-Issued inhibitPolicyMapping Test11 EE # using the default settings or open and verify Signed Test Message # 172.16.58.3 using the default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set. The path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.12.1': [ # Invalid inhibitAnyPolicy Test1 # Procedure: Validate Invalid inhibitAnyPolicy Test1 EE using the default # settings or open and verify Signed Test Message 172.16.17.32 using the # default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set (if the application can process the # policyConstraints extension). If the application can process the # policyConstraints extension, then the path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.12.2': [ # Valid inhibitAnyPolicy Test2 # Procedure: Validate Valid inhibitAnyPolicy Test2 EE using the default # settings or open and verify Signed Test Message 6.2.2.120 using the # default settings. # # Expected Result: The authorities-constrained-policy-set will be # {NIST-test-policy-1} and the explicit-policy-indicator will be set (if # the application can process the policyConstraints extension). If the # initial-policy-set is any-policy or otherwise includes # NIST-test-policy-1, then the user-constrained-policy-set will be # {NIST-test-policy-1} and the path should validate successfully. If not, # then the user-constrained-policy-set will be empty. If the # user-constrained-policy-set is empty and the application can process the # policyConstraints extension, then the path should not validate # successfully. TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]), ], '4.12.3': [ # inhibitAnyPolicy Test3 # 1. default settings. The path should validate successfully. TestInfo(True, user_constrained_policy_set=[TEST_POLICY_1]), # 2. default settings, but with initial-inhibit-any-policy set. The path # should not validate successfully. TestInfo(False, initial_inhibit_any_policy=True, user_constrained_policy_set=[]), ], '4.12.4': [ # Invalid inhibitAnyPolicy Test4 # Procedure: Validate Invalid inhibitAnyPolicy Test4 EE using the default # settings or open and verify Signed Test Message 6.2.2.122 using the # default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set (if the application can process the # policyConstraints extension). If the application can process the # policyConstraints extension, then the path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.12.5': [ # Invalid inhibitAnyPolicy Test5 # Procedure: Validate Invalid inhibitAnyPolicy Test5 EE using the default # settings or open and verify Signed Test Message 6.2.2.123 using the # default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set (if the application can process the # policyConstraints extension). If the application can process the # policyConstraints extension, then the path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.12.6': [ # Invalid inhibitAnyPolicy Test6 # Procedure: Validate Invalid inhibitAnyPolicy Test6 EE using the default # settings or open and verify Signed Test Message 6.2.2.124 using the # default settings. # # Expected Result: The authorities-constrained-policy-set and # user-constrained-policy-set will be empty and the # explicit-policy-indicator will be set (if the application can process the # policyConstraints extension). If the application can process the # policyConstraints extension, then the path should not validate # successfully. TestInfo(False, user_constrained_policy_set=[]), ], '4.12.7': [ # Valid Self-Issued inhibitAnyPolicy Test7 # Procedure: Validate Valid Self-Issued inhibitAnyPolicy Test7 EE using the # default settings or open and verify Signed Test Message 6.2.2.125 using # the default settings. # # Expected Result: The authorities-constrained-policy-set will be # {NIST-test-policy-1} and the explicit-policy-indicator will be set (if # the application can process the policyConstraints extension). If the # initial-policy-set is any-policy or otherwise includes # NIST-test-policy-1, then the user-constrained-policy-set will be # {NIST-test-policy-1} and the path should validate successfully. If not,
<reponame>patmloi/PalettePal<gh_stars>1000+ # Copyright 2021, <NAME>, mailto:<EMAIL> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Nodes for try/except/finally handling. This is the unified low level solution to trying a block, and executing code when it returns, break, continues, or raises an exception. See Developer Manual for how this maps to try/finally and try/except as in Python. """ from nuitka.Errors import NuitkaOptimizationError from nuitka.optimizations.TraceCollections import TraceCollectionBranch from .Checkers import checkStatementsSequence, checkStatementsSequenceOrNone from .NodeBases import StatementChildrenHavingBase from .StatementNodes import StatementsSequence class StatementTry(StatementChildrenHavingBase): kind = "STATEMENT_TRY" named_children = ( "tried", "except_handler", "break_handler", "continue_handler", "return_handler", ) checkers = { "tried": checkStatementsSequence, "except_handler": checkStatementsSequenceOrNone, "break_handler": checkStatementsSequenceOrNone, "continue_handler": checkStatementsSequenceOrNone, "return_handler": checkStatementsSequenceOrNone, } __slots__ = ("tried_may_raise",) def __init__( self, tried, except_handler, break_handler, continue_handler, return_handler, source_ref, ): StatementChildrenHavingBase.__init__( self, values={ "tried": tried, "except_handler": except_handler, "break_handler": break_handler, "continue_handler": continue_handler, "return_handler": return_handler, }, source_ref=source_ref, ) self.tried_may_raise = None def getDetailsForDisplay(self): return {"aborting": self.isStatementAborting()} def computeStatement(self, trace_collection): # This node has many children to handle, pylint: disable=I0021,too-many-branches,too-many-locals,too-many-statements tried = self.subnode_tried except_handler = self.subnode_except_handler break_handler = self.subnode_break_handler continue_handler = self.subnode_continue_handler return_handler = self.subnode_return_handler # The tried block must be considered as a branch, if it is not empty # already. collection_start = TraceCollectionBranch( parent=trace_collection, name="try start" ) abort_context = trace_collection.makeAbortStackContext( catch_breaks=break_handler is not None, catch_continues=continue_handler is not None, catch_returns=return_handler is not None, catch_exceptions=True, ) with abort_context: # As a branch point for the many types of handlers. result = tried.computeStatementsSequence(trace_collection=trace_collection) # We might be done entirely already. if result is None: return None, "new_statements", "Removed now empty try statement." # Might be changed. if result is not tried: self.setChild("tried", result) tried = result break_collections = trace_collection.getLoopBreakCollections() continue_collections = trace_collection.getLoopContinueCollections() return_collections = trace_collection.getFunctionReturnCollections() exception_collections = trace_collection.getExceptionRaiseCollections() # Not raising never turns into raising, but None (never calculated) and True # may no longer be true. if self.tried_may_raise is not False: self.tried_may_raise = tried.mayRaiseException(BaseException) # Exception handling is useless if no exception is to be raised. if not self.tried_may_raise: if except_handler is not None: except_handler.finalize() self.clearChild("except_handler") trace_collection.signalChange( tags="new_statements", message="Removed useless exception handler.", source_ref=except_handler.source_ref, ) except_handler = None # If tried may raise, even empty exception handler has a meaning to # ignore that exception. if self.tried_may_raise: collection_exception_handling = TraceCollectionBranch( parent=collection_start, name="except handler" ) # When no exception exits are there, this is a problem, we just # found an inconsistency that is a bug. if not exception_collections: for statement in tried.subnode_statements: if statement.mayRaiseException(BaseException): raise NuitkaOptimizationError( "This statement does raise but didn't annotate an exception exit.", statement, ) raise NuitkaOptimizationError( "Falsely assuming tried block may raise, but no statement says so.", tried, ) collection_exception_handling.mergeMultipleBranches(exception_collections) if except_handler is not None: result = except_handler.computeStatementsSequence( trace_collection=collection_exception_handling ) # Might be changed. if result is not except_handler: self.setChild("except_handler", result) except_handler = result if break_handler is not None: if not tried.mayBreak(): break_handler.finalize() self.clearChild("break_handler") break_handler = None if break_handler is not None: collection_break = TraceCollectionBranch( parent=collection_start, name="break handler" ) collection_break.mergeMultipleBranches(break_collections) result = break_handler.computeStatementsSequence( trace_collection=collection_break ) # Might be changed. if result is not break_handler: self.setChild("break_handler", result) break_handler = result if continue_handler is not None: if not tried.mayContinue(): continue_handler.finalize() self.clearChild("continue_handler") continue_handler = None if continue_handler is not None: collection_continue = TraceCollectionBranch( parent=collection_start, name="continue handler" ) collection_continue.mergeMultipleBranches(continue_collections) result = continue_handler.computeStatementsSequence( trace_collection=collection_continue ) # Might be changed. if result is not continue_handler: self.setChild("continue_handler", result) continue_handler = result if return_handler is not None: if not tried.mayReturn(): return_handler.finalize() self.clearChild("return_handler") return_handler = None if return_handler is not None: collection_return = TraceCollectionBranch( parent=collection_start, name="return handler" ) collection_return.mergeMultipleBranches(return_collections) result = return_handler.computeStatementsSequence( trace_collection=collection_return ) # Might be changed. if result is not return_handler: self.setChild("return_handler", result) return_handler = result # Check for trivial return handlers that immediately return, they can # just be removed. if return_handler is not None: if return_handler.subnode_statements[0].isStatementReturnReturnedValue(): return_handler.finalize() self.clearChild("return_handler") return_handler = None # Merge exception handler only if it is used. Empty means it is not # aborting, as it swallows the exception. if self.tried_may_raise and ( except_handler is None or not except_handler.isStatementAborting() ): trace_collection.mergeBranches( collection_yes=collection_exception_handling, collection_no=None ) # An empty exception handler means we have to swallow exception. if ( ( not self.tried_may_raise or ( except_handler is not None and except_handler.subnode_statements[ 0 ].isStatementReraiseException() ) ) and break_handler is None and continue_handler is None and return_handler is None ): return tried, "new_statements", "Removed useless try, all handlers removed." tried_statements = tried.subnode_statements pre_statements = [] while tried_statements: tried_statement = tried_statements[0] if tried_statement.mayRaiseException(BaseException): break if break_handler is not None and tried_statement.mayBreak(): break if continue_handler is not None and tried_statement.mayContinue(): break if return_handler is not None and tried_statement.mayReturn(): break pre_statements.append(tried_statement) tried_statements = list(tried_statements) del tried_statements[0] post_statements = [] if except_handler is not None and except_handler.isStatementAborting(): while tried_statements: tried_statement = tried_statements[-1] if tried_statement.mayRaiseException(BaseException): break if break_handler is not None and tried_statement.mayBreak(): break if continue_handler is not None and tried_statement.mayContinue(): break if return_handler is not None and tried_statement.mayReturn(): break post_statements.insert(0, tried_statement) tried_statements = list(tried_statements) del tried_statements[-1] if pre_statements or post_statements: assert tried_statements # Should be dealt with already tried.setChild("statements", tried_statements) result = StatementsSequence( statements=pre_statements + [self] + post_statements, source_ref=self.source_ref, ) def explain(): # TODO: We probably don't want to say this for re-formulation ones. result = "Reduced scope of tried block." if pre_statements: result += " Leading statements at %s." % ( ",".join( x.getSourceReference().getAsString() + "/" + str(x) for x in pre_statements ) ) if post_statements: result += " Trailing statements at %s." % ( ",".join( x.getSourceReference().getAsString() + "/" + str(x) for x in post_statements ) ) return result return (result, "new_statements", explain) return self, None, None def mayReturn(self): # TODO: If we optimized return handler away, this would be not needed # or even non-optimal. if self.subnode_tried.mayReturn(): return True if self.tried_may_raise is not False: except_handler = self.subnode_except_handler if except_handler is not None and except_handler.mayReturn(): return True break_handler = self.subnode_break_handler if break_handler is not None and break_handler.mayReturn(): return True continue_handler = self.subnode_continue_handler if continue_handler is not None and continue_handler.mayReturn(): return True return_handler = self.subnode_return_handler if return_handler is not None and return_handler.mayReturn(): return True return False def mayBreak(self): # TODO: If we optimized return handler away, this would be not needed # or even non-optimal. if self.subnode_tried.mayBreak(): return True if self.tried_may_raise is not False: except_handler = self.subnode_except_handler if except_handler is not None and except_handler.mayBreak(): return True break_handler = self.subnode_break_handler if break_handler is not None and break_handler.mayBreak(): return True continue_handler = self.subnode_continue_handler if continue_handler is not None and continue_handler.mayBreak(): return True return_handler = self.subnode_return_handler if return_handler is not None and return_handler.mayBreak(): return True return False def mayContinue(self): # TODO: If we optimized return handler away, this would be not needed # or even non-optimal. if self.subnode_tried.mayContinue(): return True if self.tried_may_raise is not False: except_handler = self.subnode_except_handler if except_handler is not None and except_handler.mayContinue(): return True break_handler = self.subnode_break_handler if break_handler is not None and break_handler.mayContinue(): return True continue_handler = self.subnode_continue_handler if continue_handler is not None and continue_handler.mayContinue(): return True return_handler = self.subnode_return_handler if return_handler is not None and return_handler.mayContinue(): return True return False def isStatementAborting(self): if self.tried_may_raise is not False: except_handler = self.subnode_except_handler if except_handler is None or not except_handler.isStatementAborting(): return False break_handler = self.subnode_break_handler if break_handler is not None and not break_handler.isStatementAborting(): return False continue_handler = self.subnode_continue_handler if continue_handler is not None and not continue_handler.isStatementAborting(): return False return_handler = self.subnode_return_handler if return_handler is not None and not return_handler.isStatementAborting(): return False return self.subnode_tried.isStatementAborting() def mayRaiseException(self, exception_type): if self.tried_may_raise is not False: except_handler = self.subnode_except_handler if except_handler is not None and except_handler.mayRaiseException( exception_type ): return True break_handler
+ I11i - OOooOOo i1IIIIi1Ii111 = lisp_rloc ( ) i1IIIIi1Ii111 . store_rloc_from_record ( i1iIiII , None , Iii1 . mapping_source ) if ( oO000O0oooOo != None ) : i1IIIIi1Ii111 . stats = copy . deepcopy ( oO000O0oooOo ) if 20 - 20: OoO0O00 . OoooooooOO - I1Ii111 * IiII if ( I11iI1iIi1i and i1IIIIi1Ii111 . is_rtr ( ) == False ) : continue if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00 Iii1 . rloc_set = [ i1IIIIi1Ii111 ] Iii1 . build_best_rloc_set ( ) lisp_write_ipc_map_cache ( True , Iii1 ) if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( Iii1 . print_eid_tuple ( ) , False ) , i1IIIIi1Ii111 . rle . print_rle ( False ) ) ) if 8 - 8: OoooooooOO * ooOoO0o if 26 - 26: i11iIiiIii + oO0o - i1IIi if 71 - 71: I1IiiI % I1Ii111 / oO0o % oO0o / iIii1I11I1II1 + I1Ii111 return if 86 - 86: IiII % i1IIi * o0oOOo0O0Ooo - I1Ii111 if 37 - 37: iII111i % I1IiiI - I1ii11iIi11i % I11i if 35 - 35: O0 - OoooooooOO % iII111i if 48 - 48: OOooOOo % i11iIiiIii if 49 - 49: O0 * iII111i + II111iiii - OOooOOo if 29 - 29: OoooooooOO % II111iiii - Oo0Ooo / IiII - i11iIiiIii if 64 - 64: iII111i . I1Ii111 + I1Ii111 if 1 - 1: OOooOOo % Oo0Ooo def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) : o0oo0 = lisp_map_notify ( "" ) iI1IIII1ii1 = o0oo0 . decode ( orig_packet ) if ( iI1IIII1ii1 == None ) : lprint ( "Could not decode Map-Notify packet" ) return if 81 - 81: oO0o / I11i % Ii1I . I11i + OoooooooOO if 31 - 31: OoO0O00 o0oo0 . print_notify ( ) if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii if 5 - 5: OoOoOO00 + i1IIi if 43 - 43: iII111i * I1IiiI if 20 - 20: I1IiiI . I11i * OoO0O00 . ooOoO0o . II111iiii if 6 - 6: Ii1I * OoOoOO00 % IiII + I11i i1I1iIi1IiI = source . print_address ( ) if ( o0oo0 . alg_id != 0 or o0oo0 . auth_len != 0 ) : ii1I111i = None for i1IIiI1iII in lisp_map_servers_list : if ( i1IIiI1iII . find ( i1I1iIi1IiI ) == - 1 ) : continue ii1I111i = lisp_map_servers_list [ i1IIiI1iII ] if 20 - 20: oO0o if ( ii1I111i == None ) : lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( i1I1iIi1IiI ) ) if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o return if 17 - 17: I1ii11iIi11i + o0oOOo0O0Ooo / OoO0O00 . Oo0Ooo - o0oOOo0O0Ooo / oO0o if 87 - 87: ooOoO0o ii1I111i . map_notifies_received += 1 if 74 - 74: i11iIiiIii . i11iIiiIii . iIii1I11I1II1 i111II = lisp_verify_auth ( iI1IIII1ii1 , o0oo0 . alg_id , o0oo0 . auth_data , ii1I111i . password ) if 100 - 100: i11iIiiIii - oO0o + iIii1I11I1II1 * OoOoOO00 % OOooOOo % i11iIiiIii lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if i111II else "failed" ) ) if 26 - 26: O0 if ( i111II == False ) : return else : ii1I111i = lisp_ms ( i1I1iIi1IiI , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 , None ) if 97 - 97: OOooOOo + I11i % I1Ii111 % i11iIiiIii / I1ii11iIi11i if 21 - 21: O0 + iIii1I11I1II1 / i11iIiiIii . OOooOOo * i1IIi if 3 - 3: i1IIi % o0oOOo0O0Ooo + OoOoOO00 if 32 - 32: OoO0O00 . Oo0Ooo * iIii1I11I1II1 if 12 - 12: O0 + I1ii11iIi11i + I11i . I1Ii111 if 48 - 48: Ii1I . iIii1I11I1II1 - iIii1I11I1II1 * I11i . OoooooooOO O00OO0OO = o0oo0 . eid_records if ( o0oo0 . record_count == 0 ) : lisp_send_map_notify_ack ( lisp_sockets , O00OO0OO , o0oo0 , ii1I111i ) return if 73 - 73: Ii1I / II111iiii - iIii1I11I1II1 . ooOoO0o * II111iiii . OOooOOo if 50 - 50: iIii1I11I1II1 + OoOoOO00 % O0 + OoO0O00 . i11iIiiIii / oO0o if 31 - 31: I1IiiI % o0oOOo0O0Ooo . i11iIiiIii % OOooOOo - iIii1I11I1II1 if 77 - 77: i11iIiiIii / OOooOOo if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o if 59 - 59: oO0o * OoO0O00 - I11i * I1IiiI if 60 - 60: iII111i - OoooooooOO / iII111i % OoO0O00 . OoOoOO00 - o0oOOo0O0Ooo I111IoOo0oOOO0o = lisp_eid_record ( ) iI1IIII1ii1 = I111IoOo0oOOO0o . decode ( O00OO0OO ) if ( iI1IIII1ii1 == None ) : return if 71 - 71: iII111i * o0oOOo0O0Ooo * i11iIiiIii * O0 I111IoOo0oOOO0o . print_record ( " " , False ) if 77 - 77: OOooOOo % iII111i + I11i / OoOoOO00 for Ii1i1Ii in range ( I111IoOo0oOOO0o . rloc_count ) : i1iIiII = lisp_rloc_record ( ) iI1IIII1ii1 = i1iIiII . decode ( iI1IIII1ii1 , None ) if ( iI1IIII1ii1 == None ) : lprint ( " Could not decode RLOC-record in Map-Notify packet" ) return if 50 - 50: OoOoOO00 - i11iIiiIii - OOooOOo . iIii1I11I1II1 i1iIiII . print_record ( " " ) if 97 - 97: oO0o % OOooOOo . OoooooooOO * Ii1I if 100 - 100: I1ii11iIi11i / Ii1I % Oo0Ooo if 83 - 83: O0 . I1Ii111 % I1ii11iIi11i if 97 - 97: Oo0Ooo % OoO0O00 * I1ii11iIi11i * ooOoO0o * OoO0O00 if 12 - 12: ooOoO0o if ( I111IoOo0oOOO0o . group . is_null ( ) == False ) : if 56 - 56: i1IIi if 3 - 3: OOooOOo - Oo0Ooo * Ii1I + i11iIiiIii if 53 - 53: i1IIi % I1ii11iIi11i if 65 - 65: I11i + OoOoOO00 - i11iIiiIii if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( I111IoOo0oOOO0o . print_eid_tuple ( ) , False ) ) ) if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i if 36 - 36: OoO0O00 + Ii1I / I11i - iII111i % OoO0O00 / Oo0Ooo Oooo000 = lisp_control_packet_ipc ( orig_packet , i1I1iIi1IiI , "lisp-itr" , 0 ) lisp_ipc ( Oooo000 , lisp_sockets [ 2 ] , "lisp-core-pkt" ) if 38 - 38: Ii1I - ooOoO0o - O0 + oO0o . iIii1I11I1II1 if 90 - 90: i1IIi * OoOoOO00 if 27 - 27: iIii1I11I1II1 if 95 - 95: iII111i / ooOoO0o % Ii1I if 44 - 44: OOooOOo . OOooOOo lisp_send_map_notify_ack ( lisp_sockets , O00OO0OO , o0oo0 , ii1I111i ) return if 5 - 5: oO0o + OoooooooOO if 88 - 88: oO0o + OOooOOo if 14 - 14: I11i / i1IIi if 56 - 56: OoooooooOO if 59 - 59: I1ii11iIi11i + OoO0O00 if 37 - 37: IiII * I1IiiI % O0 if 32 - 32: ooOoO0o % II111iiii if 60 - 60: i11iIiiIii def lisp_process_map_notify_ack ( packet , source ) : o0oo0 = lisp_map_notify ( "" ) packet = o0oo0 . decode ( packet ) if ( packet == None ) : lprint ( "Could not decode Map-Notify-Ack packet" ) return if 11 - 11: o0oOOo0O0Ooo if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i o0oo0 . print_notify ( ) if 26 - 26: Ii1I if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i if 50 - 50: IiII / i1IIi . I1ii11iIi11i if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00 if 44 - 44: II111iiii if (
to the provider's website. Use text and URL to create the hyperlink. :type url: str :ivar optional_for_list_display: Indicates whether this provider's attribution is optional. :vartype optional_for_list_display: bool """ _validation = { 'target_property_name': {'readonly': True}, '_type': {'required': True}, 'must_be_close_to_content': {'readonly': True}, 'text': {'required': True}, 'url': {'required': True}, 'optional_for_list_display': {'readonly': True}, } _attribute_map = { 'target_property_name': {'key': 'targetPropertyName', 'type': 'str'}, '_type': {'key': '_type', 'type': 'str'}, 'must_be_close_to_content': {'key': 'mustBeCloseToContent', 'type': 'bool'}, 'text': {'key': 'text', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'}, 'optional_for_list_display': {'key': 'optionalForListDisplay', 'type': 'bool'}, } def __init__(self, **kwargs): super(ContractualRulesLinkAttribution, self).__init__(**kwargs) self.text = kwargs.get('text', None) self.url = kwargs.get('url', None) self.optional_for_list_display = None self._type = 'ContractualRules/LinkAttribution' class ContractualRulesMediaAttribution(ContractualRulesAttribution): """Defines a contractual rule for media attribution. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar target_property_name: The name of the field that the rule applies to. :vartype target_property_name: str :param _type: Required. Constant filled by server. :type _type: str :ivar must_be_close_to_content: A Boolean value that determines whether the contents of the rule must be placed in close proximity to the field that the rule applies to. If true, the contents must be placed in close proximity. If false, or this field does not exist, the contents may be placed at the caller's discretion. :vartype must_be_close_to_content: bool :ivar url: The URL that you use to create of hyperlink of the media content. For example, if the target is an image, you would use the URL to make the image clickable. :vartype url: str """ _validation = { 'target_property_name': {'readonly': True}, '_type': {'required': True}, 'must_be_close_to_content': {'readonly': True}, 'url': {'readonly': True}, } _attribute_map = { 'target_property_name': {'key': 'targetPropertyName', 'type': 'str'}, '_type': {'key': '_type', 'type': 'str'}, 'must_be_close_to_content': {'key': 'mustBeCloseToContent', 'type': 'bool'}, 'url': {'key': 'url', 'type': 'str'}, } def __init__(self, **kwargs): super(ContractualRulesMediaAttribution, self).__init__(**kwargs) self.url = None self._type = 'ContractualRules/MediaAttribution' class ContractualRulesTextAttribution(ContractualRulesAttribution): """Defines a contractual rule for text attribution. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar target_property_name: The name of the field that the rule applies to. :vartype target_property_name: str :param _type: Required. Constant filled by server. :type _type: str :ivar must_be_close_to_content: A Boolean value that determines whether the contents of the rule must be placed in close proximity to the field that the rule applies to. If true, the contents must be placed in close proximity. If false, or this field does not exist, the contents may be placed at the caller's discretion. :vartype must_be_close_to_content: bool :param text: Required. The attribution text. Text attribution applies to the entity as a whole and should be displayed immediately following the entity presentation. If there are multiple text or link attribution rules that do not specify a target, you should concatenate them and display them using a "Data from:" label. :type text: str :ivar optional_for_list_display: Indicates whether this provider's attribution is optional. :vartype optional_for_list_display: bool """ _validation = { 'target_property_name': {'readonly': True}, '_type': {'required': True}, 'must_be_close_to_content': {'readonly': True}, 'text': {'required': True}, 'optional_for_list_display': {'readonly': True}, } _attribute_map = { 'target_property_name': {'key': 'targetPropertyName', 'type': 'str'}, '_type': {'key': '_type', 'type': 'str'}, 'must_be_close_to_content': {'key': 'mustBeCloseToContent', 'type': 'bool'}, 'text': {'key': 'text', 'type': 'str'}, 'optional_for_list_display': {'key': 'optionalForListDisplay', 'type': 'bool'}, } def __init__(self, **kwargs): super(ContractualRulesTextAttribution, self).__init__(**kwargs) self.text = kwargs.get('text', None) self.optional_for_list_display = None self._type = 'ContractualRules/TextAttribution' class CreativeWork(Thing): """CreativeWork. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaObject, License Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param _type: Required. Constant filled by server. :type _type: str :ivar id: A String identifier. :vartype id: str :ivar contractual_rules: A list of rules that you must adhere to if you display the item. :vartype contractual_rules: list[~azure.cognitiveservices.search.entitysearch.models.ContractualRulesContractualRule] :ivar web_search_url: The URL To Bing's search result for this item. :vartype web_search_url: str :ivar name: The name of the thing represented by this object. :vartype name: str :ivar url: The URL to get more information about the thing represented by this object. :vartype url: str :ivar image: :vartype image: ~azure.cognitiveservices.search.entitysearch.models.ImageObject :ivar description: A short description of the item. :vartype description: str :ivar entity_presentation_info: Additional information about the entity such as hints that you can use to determine the entity's type. To determine the entity's type, use the entityScenario and entityTypeHint fields. :vartype entity_presentation_info: ~azure.cognitiveservices.search.entitysearch.models.EntitiesEntityPresentationInfo :ivar bing_id: An ID that uniquely identifies this item. :vartype bing_id: str :ivar thumbnail_url: The URL to a thumbnail of the item. :vartype thumbnail_url: str :ivar provider: The source of the creative work. :vartype provider: list[~azure.cognitiveservices.search.entitysearch.models.Thing] :ivar text: :vartype text: str """ _validation = { '_type': {'required': True}, 'id': {'readonly': True}, 'contractual_rules': {'readonly': True}, 'web_search_url': {'readonly': True}, 'name': {'readonly': True}, 'url': {'readonly': True}, 'image': {'readonly': True}, 'description': {'readonly': True}, 'entity_presentation_info': {'readonly': True}, 'bing_id': {'readonly': True}, 'thumbnail_url': {'readonly': True}, 'provider': {'readonly': True}, 'text': {'readonly': True}, } _attribute_map = { '_type': {'key': '_type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'contractual_rules': {'key': 'contractualRules', 'type': '[ContractualRulesContractualRule]'}, 'web_search_url': {'key': 'webSearchUrl', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'}, 'image': {'key': 'image', 'type': 'ImageObject'}, 'description': {'key': 'description', 'type': 'str'}, 'entity_presentation_info': {'key': 'entityPresentationInfo', 'type': 'EntitiesEntityPresentationInfo'}, 'bing_id': {'key': 'bingId', 'type': 'str'}, 'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'}, 'provider': {'key': 'provider', 'type': '[Thing]'}, 'text': {'key': 'text', 'type': 'str'}, } _subtype_map = { '_type': {'MediaObject': 'MediaObject', 'License': 'License'} } def __init__(self, **kwargs): super(CreativeWork, self).__init__(**kwargs) self.thumbnail_url = None self.provider = None self.text = None self._type = 'CreativeWork' class LocalBusiness(Place): """LocalBusiness. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EntertainmentBusiness, FoodEstablishment, LodgingBusiness Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param _type: Required. Constant filled by server. :type _type: str :ivar id: A String identifier. :vartype id: str :ivar contractual_rules: A list of rules that you must adhere to if you display the item. :vartype contractual_rules: list[~azure.cognitiveservices.search.entitysearch.models.ContractualRulesContractualRule] :ivar web_search_url: The URL To Bing's search result for this item. :vartype web_search_url: str :ivar name: The name of the thing represented by this object. :vartype name: str :ivar url: The URL to get more information about the thing represented by this object. :vartype url: str :ivar image: :vartype image: ~azure.cognitiveservices.search.entitysearch.models.ImageObject :ivar description: A short description of the item. :vartype description: str :ivar entity_presentation_info: Additional information about the entity such as hints that you can use to determine the entity's type. To determine the entity's type, use the entityScenario and entityTypeHint fields. :vartype entity_presentation_info: ~azure.cognitiveservices.search.entitysearch.models.EntitiesEntityPresentationInfo :ivar bing_id: An ID that uniquely identifies this item. :vartype bing_id: str :ivar address: The postal address of where the entity is located :vartype address: ~azure.cognitiveservices.search.entitysearch.models.PostalAddress :ivar telephone: The entity's telephone number :vartype telephone: str :ivar price_range: $$. :vartype price_range: str :ivar panoramas: :vartype panoramas: list[~azure.cognitiveservices.search.entitysearch.models.ImageObject] :ivar is_permanently_closed: :vartype is_permanently_closed: bool :ivar tag_line: :vartype tag_line: str """ _validation = { '_type': {'required': True}, 'id': {'readonly': True}, 'contractual_rules': {'readonly': True}, 'web_search_url': {'readonly': True}, 'name': {'readonly': True}, 'url': {'readonly': True}, 'image': {'readonly': True}, 'description': {'readonly': True}, 'entity_presentation_info': {'readonly': True}, 'bing_id': {'readonly': True}, 'address': {'readonly': True}, 'telephone': {'readonly': True}, 'price_range': {'readonly': True}, 'panoramas': {'readonly': True}, 'is_permanently_closed': {'readonly': True}, 'tag_line': {'readonly': True}, } _attribute_map = { '_type': {'key': '_type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'contractual_rules': {'key': 'contractualRules', 'type': '[ContractualRulesContractualRule]'}, 'web_search_url': {'key': 'webSearchUrl', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'}, 'image': {'key': 'image', 'type': 'ImageObject'}, 'description': {'key': 'description', 'type': 'str'}, 'entity_presentation_info': {'key': 'entityPresentationInfo', 'type': 'EntitiesEntityPresentationInfo'}, 'bing_id': {'key': 'bingId', 'type': 'str'}, 'address': {'key': 'address', 'type': 'PostalAddress'}, 'telephone': {'key': 'telephone', 'type': 'str'}, 'price_range': {'key': 'priceRange', 'type': 'str'}, 'panoramas': {'key': 'panoramas', 'type': '[ImageObject]'}, 'is_permanently_closed': {'key': 'isPermanentlyClosed', 'type': 'bool'}, 'tag_line':
"""AppDaemon App For use with Monitor Bluetooth Presence Detection Script. apps.yaml parameters: | - monitor_topic (default 'monitor'): MQTT Topic monitor.sh script publishes to | - mqtt_event (default 'MQTT_MESSAGE'): MQTT event name as specified in the plugin setting | - not_home_timeout (default 30s): Time interval before declaring not home | - minimum_confidence (default 50): Minimum Confidence Level to consider home | - depart_check_time (default 30s): Time to wait before running depart scan | - system_timeout (default 90s): Time for system to report back from echo | - system_check (default 30s): Time interval for checking if system is online | - everyone_not_home: Name to use for the "Everyone Not Home" Sensor | - everyone_home: Name to use for the "Everyone Home" Sensor | - somebody_is_home: Name to use for the "Somebody Is Home" Sensor | - user_device_domain: Use "binary_sensor" or "device_tracker" domains. | - known_devices: Known devices to be added to each monitor. | - known_beacons: Known Beacons to monitor. | - remote_monitors: login details of remote monitors that can be hardware rebooted """ import json import adbase as ad import copy from datetime import datetime, timedelta import traceback __VERSION__ = "2.3.4" # pylint: disable=attribute-defined-outside-init,unused-argument class HomePresenceApp(ad.ADBase): """Home Precence App Main Class.""" def initialize(self): """Initialize AppDaemon App.""" self.adbase = self.get_ad_api() self.hass = self.get_plugin_api("HASS") self.mqtt = self.get_plugin_api("MQTT") self.presence_topic = self.args.get("monitor_topic", "monitor") self.user_device_domain = self.args.get("user_device_domain", "binary_sensor") # State string to use depends on which domain is in use. self.state_true = "on" if self.user_device_domain == "binary_sensor" else "home" self.state_false = ( "off" if self.user_device_domain == "binary_sensor" else "not_home" ) # Setup dictionary of known beacons in the format { mac_id: name }. self.known_beacons = { p[0]: p[1].lower() for p in (b.split(" ", 1) for b in self.args.get("known_beacons", [])) } # Setup dictionary of known devices in the format { mac_id: name }. self.known_devices = { p[0]: p[1].lower() for p in (b.split(" ", 1) for b in self.args.get("known_devices", [])) } # Support nested presence topics (e.g. "hass/monitor") self.topic_level = len(self.presence_topic.split("/")) self.presence_name = self.presence_topic.split("/")[-1] self.timeout = self.args.get("not_home_timeout", 30) self.minimum_conf = self.args.get("minimum_confidence", 50) self.depart_check_time = self.args.get("depart_check_time", 30) self.system_timeout = self.args.get("system_timeout", 60) system_check = self.args.get("system_check", 30) self.all_users_sensors = [] self.not_home_timers = dict() self.location_timers = dict() self.confidence_handlers = dict() self.home_state_entities = dict() self.system_handle = dict() self.node_scheduled_reboot = dict() self.node_executing = dict() # Create a sensor to keep track of if the monitor is busy or not. self.monitor_entity = f"{self.presence_name}.monitor_state" self.mqtt.set_state( self.monitor_entity, state="idle", attributes={ "locations": [], "version": __VERSION__, "nodes": 0, "online_nodes": [], "offline_nodes": [], "friendly_name": "Monitor System State", }, replace=True, ) # Listen for requests to scan immediately. self.mqtt.listen_state(self.monitor_scan_now, self.monitor_entity, new="scan") # Listen for all changes to the monitor entity for MQTT forwarding self.mqtt.listen_state( self.forward_monitor_state, self.monitor_entity, attribute="all", immediate=True, ) self.monitor_handlers = {self.monitor_entity: None} # Setup the Everybody Home/Not Home Group Sensors self.setup_global_sensors() # Initialize our timer variables self.gateway_timer = None self.motion_timer = None self.check_home_timer = None # Setup home gateway sensors if self.args.get("home_gateway_sensors") is not None: for gateway_sensor in self.args["home_gateway_sensors"]: self.hass.listen_state(self.gateway_opened, gateway_sensor) else: # no gateway sensors, do app has to run arrive and depart scans every 2 minutes self.adbase.log( "No Gateway Sensors specified, Monitor-APP will run Arrive and Depart Scan every 2 minutes. Please specify Gateway Sensors for a better experience", level="WARNING", ) self.adbase.run_every( self.run_arrive_scan, self.adbase.datetime() + timedelta(seconds=1), 60 ) self.adbase.run_every( self.run_depart_scan, self.adbase.datetime() + timedelta(seconds=2), 60 ) # Setup home motion sensors, used for RSSI tracking for motion_sensor in self.args.get("home_motion_sensors", []): self.hass.listen_state(self.motion_detected, motion_sensor) if self.args.get("scheduled_restart") is not None: kwargs = {} if "time" in self.args["scheduled_restart"]: time = self.args["scheduled_restart"]["time"] if "days" in self.args["scheduled_restart"]: kwargs["constrain_days"] = ",".join( self.args["scheduled_restart"]["days"] ) if "location" in self.args["scheduled_restart"]: kwargs["location"] = self.args["scheduled_restart"]["location"] self.adbase.log("Setting up Monitor auto reboot") self.adbase.run_daily(self.restart_device, time, **kwargs) else: self.adbase.log( "Will not be setting up auto reboot, as no time specified", level="WARNING", ) # Setup the system checks. if self.system_timeout > system_check: topic = f"{self.presence_topic}/echo" self.adbase.run_every( self.send_mqtt_message, self.adbase.datetime() + timedelta(seconds=1), system_check, topic=topic, payload="", scan_type="System", ) else: self.adbase.log( "Cannot setup System Check due to System Timeout" " being Lower than System Check in Seconds", level="WARNING", ) # Setup primary MQTT Listener for all presence messages. self.mqtt.listen_event( self.presence_message, self.args.get("mqtt_event", "MQTT_MESSAGE"), wildcard=f"{self.presence_topic}/#", ) self.adbase.log(f"Listening on MQTT Topic {self.presence_topic}", level="DEBUG") # Listen for any HASS restarts self.hass.listen_event(self.hass_restarted, "plugin_restarted") # Load the devices from the config. self.adbase.run_in(self.clean_devices, 0) # clean old devices first self.setup_service() # setup service def setup_global_sensors(self): """Add all global home/not_home sensors.""" everyone_not_home = self.args.get("everyone_not_home", "everyone_not_home") self.everyone_not_home = f"binary_sensor.{everyone_not_home}" everyone_home = self.args.get("everyone_home", "everyone_home") self.everyone_home = f"binary_sensor.{everyone_home}" somebody_is_home = self.args.get("somebody_is_home", "somebody_is_home") self.somebody_is_home = f"binary_sensor.{somebody_is_home}" self.create_global_sensor(everyone_not_home) self.create_global_sensor(everyone_home) self.create_global_sensor(somebody_is_home) def create_global_sensor(self, sensor): """Create a global sensor in HASS if it does not exist.""" if self.hass.entity_exists(f"binary_sensor.{sensor}"): return self.adbase.log(f"Creating Binary Sensor for {sensor}", level="DEBUG") attributes = { "friendly_name": sensor.replace("_", " ").title(), "device_class": "presence", } self.hass.set_state( f"binary_sensor.{sensor}", state="off", attributes=attributes ) def presence_message(self, event_name, data, kwargs): """Process a message sent on the MQTT Topic.""" topic = data.get("topic") payload = data.get("payload") self.adbase.log(f"{topic} payload: {payload}", level="DEBUG") topic_path = topic.split("/") action = topic_path[-1].lower() # Process the payload as JSON if it is JSON payload_json = {} try: payload_json = json.loads(payload) except ValueError: pass # Handle request for immediate scan via MQTT # can be arrive/depart/rssi if action == "run_scan": # add scan_delay=0 to ensure its done immediately self.mqtt.call_service( f"{self.presence_topic}/run_{payload.lower()}_scan", scan_delay=0 ) return # Determine which scanner initiated the message location = "unknown" if isinstance(payload_json, dict) and "identity" in payload_json: location = payload_json.get("identity", "unknown") elif len(topic_path) > self.topic_level + 1: location = topic_path[self.topic_level] location = location.replace(" ", "_").lower() location_friendly = location.replace("_", " ").title() # Presence System is Restarting if action == "restart": self.adbase.log("The Entire Presence System is Restarting", level="INFO") return # Miscellaneous Actions, Discard if action in [ "depart", "arrive", "state", "known device states", "add static device", "delete static device", ]: return # Status Message from the Presence System if action == "status": self.handle_status(location=location, payload=payload.lower()) return if action in ["start", "end"]: self.handle_scanning( action=action, location=location, scan_type=topic_path[self.topic_level + 1], ) return # Response to Echo Check of Scanner if action == "echo": self.handle_echo(location=location, payload=payload) return # Handle request for reboot of hardware if action == "reboot": self.adbase.run_in(self.restart_device, 1, location=location) return device_name = topic_path[self.topic_level + 1] # Handle Beacon Topics in MAC or iBeacon ID formats and make friendly. if device_name in list(self.known_beacons.keys()): device_name = self.known_beacons[device_name] else: device_name = device_name.replace(":", "_").replace("-", "_") device_entity_id = f"{self.presence_name}_{device_name}" device_state_sensor = f"{self.user_device_domain}.{device_entity_id}" device_entity_prefix = f"{device_entity_id}_{location}" device_conf_sensor = f"sensor.{device_entity_prefix}_conf" device_local = f"{device_name}_{location}" appdaemon_entity = f"{self.presence_name}.{device_local}" friendly_name = device_name.strip().replace("_", " ").title() # RSSI Value for a Known Device: if action == "rssi": if topic == f"{self.presence_topic}/scan/rssi" or payload == "": return attributes = { "rssi": payload, "last_reported_by": location.replace("_", " ").title(), } self.adbase.log( f"Recieved an RSSI of {payload} for {device_name} from {location_friendly}", level="DEBUG", ) if ( self.hass.entity_exists(device_conf_sensor) and self.hass.get_state(device_state_sensor, copy=False) == self.state_true ): # unless it exists, and the device is home don't update RSSI self.mqtt.set_state(appdaemon_entity, attributes=attributes) self.update_hass_sensor(device_conf_sensor, new_attr={"rssi": payload}) self.update_nearest_monitor(device_name) return # Ignore invalid JSON responses if not payload_json: return # Ignore unknown/bad types and unknown beacons if payload_json.get("type") not in [ "KNOWN_MAC", "GENERIC_BEACON", ] and payload_json.get("id") not in list(self.known_beacons.keys()): self.adbase.log( f"Ignoring Beacon {payload_json.get('id')} because it is not in the known_beacons list.", level="DEBUG", ) return # Clean-up names now that we have proper JSON payload available. payload_json["friendly_name"] = f"{friendly_name} {location_friendly}" if "name" in payload_json: payload_json["name"] = payload_json["name"].strip().title() # Get the confidence value from the payload confidence = int(float(payload_json.get("confidence", "0"))) del payload_json["confidence"] state = self.state_true if confidence >= self.minimum_conf else self.state_false if not self.hass.entity_exists(device_conf_sensor): # Entity does not exist in HASS yet. self.adbase.log( "Creating sensor {!r} for Confidence".format(device_conf_sensor) ) self.hass.set_state( device_conf_sensor, state=confidence, attributes={ "friendly_name": f"{friendly_name} {location_friendly} Confidence", "unit_of_measurement": "%", }, ) if not self.hass.entity_exists(device_state_sensor): # Device Home Presence Sensor Doesn't Exist Yet in Hass so create it self.adbase.log( "Creating sensor {!r} for Home State".format(device_state_sensor), level="DEBUG", ) self.hass.set_state( device_state_sensor, state=state, attributes={ "friendly_name": f"{friendly_name} Home", "type": payload_json.get("type", "UNKNOWN_TYPE"), "device_class": "presence", }, ) if not self.mqtt.entity_exists(device_state_sensor): # Device Home Presence Sensor Doesn't Exist Yet in default so create it self.adbase.log( "Creating sensor {!r} for Home State".format(device_state_sensor), level="DEBUG", ) self.mqtt.set_state( device_state_sensor, state=state, attributes={ "friendly_name": f"{friendly_name} Home", "type": payload_json.get("type", "UNKNOWN_TYPE"), "device_class": "presence", }, ) if device_entity_id not in self.home_state_entities:
string # Otherwise return the original string. # If matchAreaLabels, the areaLabel of previous node must match # that of the current if we are to return an empty string. # This prevents phrases such as: # Chance of rain and snow 20 percent windward rain and snow 40 percent leeward. # # Check sub-phrases #print "Check Repeating", node.getAncestor('name'), str #print " matchAreaLabels", matchAreaLabels prevNode = node.getPrev() if prevNode is not None: if matchAreaLabels and \ prevNode.getAreaLabel() != node.getAreaLabel(): return str prevStr = prevNode.get(strName) if prevStr is not None and str == prevStr: # Do not repeat previous str #print "return 1" return "" # Check degenerate conjunctive local effect # We are looking for these conditions: # --This phrase has only one sub-phrase # --The previous phrase has only one sub-phrase AND # has the same name as the current phrase (e.g. popMax_phrase # --The str for the sub-phrases are the same phrase = node.getParent() #tree.printNode(phrase.parent) if len(phrase.childList) == 1: prevPhrase = phrase.getPrev() if prevPhrase is not None: if matchAreaLabels and \ prevPhrase.getAreaLabel() != node.getAreaLabel(): return str if prevPhrase.get("name") == phrase.get("name"): if len(prevPhrase.childList) == 1: prevSubPhrase = prevPhrase.childList[0] prevStr = prevSubPhrase.get(strName) if prevSubPhrase.get('words') is None: # Must wait for previous words to finish return -1 if prevStr is not None and str == prevStr: # Do not repeat previous str #print "return 2" return "" return str # Local Effects def checkLocalEffects(self, tree, node): localEffectsList = self.getLocalEffectsList(tree, node) #print " le list", localEffectsList if localEffectsList is None or len(localEffectsList) == 0: return self.DONE() childList = node.get("childList") if childList is None or len(childList) < 1: return self.DONE() if self.__dict__.get('_leDebug',0): print "\nChecking local effects for", node.get('name'), node.getAreaLabel() print " node", node print " parent", node.parent print " disabled", node.get('disabledSubkeys'), node.getAncestor('disabledSubkeys') print "\ncomp phrases before:" self.printCompPhrases(tree, node) for localEffect in localEffectsList: # If ANY subPhrase has a local effect, create conjunctive local effect. # If ALL subPhrases have the same local effect "groups", use that grouping. # Otherwise, create a conjunctive phrase for each local effect area. flag = 0 firstTime = 1 sameGroups = 1 for checkNode in childList: nodeFlag, nodeGroups = self.checkLocalEffect(tree, checkNode, localEffect) if nodeFlag: flag = 1 if firstTime: groups = nodeGroups firstTime = 0 elif groups != nodeGroups: # flag must be 1 sameGroups = 0 break if flag: # Create conjunctive local effect #print "Creating conjunctive local effect" if sameGroups == 0: groups = [] leAreaList = self.getLeAreaList(tree, node, localEffect) for leArea in leAreaList: groups.append([leArea]) nodeList = self.makeLocalEffectNodes(tree, node, localEffect, groups) # Applies only to the skyPopWx_phrase # Set up includeSky for new local effect nodes includeSky = self.getIncludeSky(tree, node) for newNode in nodeList: newNode.set("includeSky", includeSky) if self.__dict__.get('_leDebug',0): print "newNode", newNode.get("name"), newNode.get("areaLabel") print " includeSky", includeSky, newNode node.replace(nodeList) if flag: # There is a local effect self.localEffect_hook(tree, node) if self.__dict__.get('_leDebug',0): print "\ncomp phrases after:", self.printCompPhrases(tree, node) return self.DONE() def checkLocalEffect(self, tree, node, localEffect): # Check each local effect area against all others for the given node. # Determine "groups" i.e. group the local effect areas according to # similar statistics. # Return # -- a flag to indicate if any local effect areas showed differing # statistics. # -- the "groups" triggerMethod = localEffect.triggerMethod leAreaList = self.getLeAreaList(tree, node, localEffect) if len(leAreaList) == 0: return 0, [] # Begin with one group consisting of first local effect edit area groups = [[leAreaList[0]]] # This loop checks each subsequent local effect edit area against # the existing groups and appends it to the first group which # has similar statistics. # If no existing group has similar statistics, a new group is # created. for leArea1 in leAreaList[1:]: addedToExisting = 0 for group in groups: leArea2 = group[0] difference = self.checkThreshold( tree, node, triggerMethod, leArea1, leArea2, localEffect) if difference == 0: # Similar statistics, so # append it to the current group group.append(leArea1) addedToExisting = 1 break if addedToExisting == 0: # Did not find similar group, so create a new group groups.append([leArea1]) if len(groups) == 1: flag = 0 else: flag = 1 return flag, groups def getLocalEffectsList(self, tree, node): leList = node.get("localEffectsList") if type(leList) is types.MethodType: return leList(tree, node) else: return leList def getLeAreaList(self, tree, node, localEffect): leAreaList = localEffect.leAreaList if type(leAreaList) is types.MethodType: return leAreaList(tree, node) else: return leAreaList def getLeAreaLabel(self, tree, node, leArea): if leArea.areaLabel == "__Current__": return node.getAreaLabel() elif leArea.intersectFlag: return self.getIntersectName(node.getAreaLabel(), leArea.areaLabel) #return self.getIntersectName(tree.getAreaLabel(), leArea.areaLabel) else: return leArea.areaLabel def getLeQualifiers(self, tree, node, group): # Return the qualifiers for this group of leAreas # There is a qualifer for embedded local effect phrases # and one for conjunctive local effect phrases. embeddedQualifier = "" conjQualifier = "" length = len(group) index = 0 for leArea in group: areaWords = leArea.areaWords if type(areaWords) is types.MethodType: areaWords = areaWords(tree, node, leArea) embeddedQualifier = embeddedQualifier + areaWords conjWords = leArea.conjAreaWords if type(conjWords) is types.MethodType: conjWords = conjWords(tree, node, leArea) conjQualifier = conjQualifier + conjWords # if last one, do not add conjunction if index == length - 1: break embeddedQualifier = embeddedQualifier + " and " conjQualifier = conjQualifier + " and " index = index + 1 return embeddedQualifier, conjQualifier def checkThreshold(self, tree, node, triggerMethod, leArea1, leArea2, localEffect): # Return 1 if the difference between leArea1 and leArea2 stats is # greater than the threshold # Handles stats that are a min/max or a singleValue leArea1Label = self.getLeAreaLabel(tree, node, leArea1) leArea2Label = self.getLeAreaLabel(tree, node, leArea2) if type(triggerMethod) is types.MethodType: flag = triggerMethod(tree, node, localEffect, leArea1Label, leArea2Label) else: first = node.getAncestor("firstElement") element = first.name dataType = first.dataType if dataType == self.WEATHER(): mergeMethod = "Average" else: mergeMethod = "MinMax" timeRange = node.getTimeRange() area1Stats = tree.stats.get(element, timeRange, leArea1Label, mergeMethod=mergeMethod) area2Stats = tree.stats.get(element, timeRange, leArea2Label, mergeMethod=mergeMethod) area1Stats = self.applyDisabled(tree, node, area1Stats) area2Stats = self.applyDisabled(tree, node, area2Stats) if self.__dict__.get("_leDebug", 0): print "\nCheckThreshold", element, timeRange print leArea1Label, area1Stats print leArea2Label, area2Stats if area1Stats is None or area2Stats is None: return 0 flag = self.checkLocalEffectDifference( tree, node, dataType, triggerMethod, area1Stats, area2Stats, leArea1Label, leArea2Label) if self.__dict__.get("_leDebug", 0): print "returning", flag return flag def applyDisabled(self, tree, node, stats): if stats is None: return stats disabledSubkeys = node.getAncestor('disabledSubkeys') #print "/n applyDisabled: disabled", disabledSubkeys #print "stats", stats if disabledSubkeys is not None: newStats = [] for subkey, rank in stats: if subkey not in disabledSubkeys: newStats.append((subkey, rank)) stats = newStats if stats == []: emptyKey = WeatherSubKey.weatherSubKey(self._argDict['site'], "<NoCov>", "<NoWx>", "<NoInten>", "<NoVis>", []) stats = [(emptyKey, 100)] return stats def checkLocalEffectDifference(self, tree, node, dataType, threshold, area1Stats, area2Stats, al1, al2): if dataType == self.DISCRETE(): if area1Stats != area2Stats: return 1 else: return 0 if dataType == self.WEATHER(): flag = self.checkWeatherSimilarity( tree, node, area1Stats, area2Stats, al1=al1, al2=al2) # checkWeatherSimilarity returns 0 if there IS a difference and, thus, # should be a local effect if flag == 0: return 1 else: return 0 if dataType == self.VECTOR(): area1Stats, dir = area1Stats area2Stats, dir = area2Stats if type(area1Stats) is types.TupleType: min1, max1 = area1Stats min2, max2 = area2Stats diff1 = self.absDiff(min1, min2) diff2 = self.absDiff(max1, max2) # Check to see if one range is included within the other if self.rangeIncluded(min1, max1, min2, max2) == 1: return 0 if self.rangeIncluded(min2, max2, min1, max1) == 1: return 0 # Check to see if either min or max is greater than threshold if diff1 > threshold or diff2 > threshold: return 1 else: return 0 else: absDiff = self.absDiff(area1Stats, area2Stats) if absDiff > threshold: return 1 else: return 0 def checkSkyWxDifference(self, tree, node, localEffect, leArea1Label, leArea2Label): timeRange = node.getTimeRange() wxStats1 = tree.stats.get("Wx", timeRange, leArea1Label, mergeMethod="Average") wxStats2 = tree.stats.get("Wx", timeRange, leArea2Label, mergeMethod="Average") wxStats1 = self.applyDisabled(tree, node, wxStats1) wxStats2 = self.applyDisabled(tree, node, wxStats2) #print "wxStats1", wxStats1 #print "wxStats2", wxStats2 wxSame = self.checkWeatherSimilarity( tree, node, wxStats1, wxStats2, al1=leArea1Label, al2=leArea2Label) #print "wxSame", wxSame if wxSame == 0: wxDiff = 1 else: wxDiff = 0 skyDiff = self.checkSkyDifference(tree,
self._stmt_delete_key = self._session.prepare(self.QUERY_DELETE_KEY.format(table_name)) self._stmt_delete_val = self._session.prepare(self.QUERY_DELETE_VAL.format(table_name)) @property def buffer_size(self): """ Get the buffer size. :rtype: int :return: the buffer size """ return self._buffer_size @buffer_size.setter def buffer_size(self, value): """ Set the buffer size and propagate it to the underlying client. :param int value: buffer size """ self._buffer_size = value @staticmethod def split_sequence(iterable, size): """ Generator to split an iterable in chunks of given size. :param iterable iterable: the iterable to split :param int size: the size of a chunk :rtype: generator[iterable] :return: a generator """ iterator = iter(iterable) item = list(itertools.islice(iterator, size)) while item: yield item item = list(itertools.islice(iterator, size)) def _select(self, statements_and_parameters): """ Execute a list of statements and parameters returning data. :param iterable[tuple] statements_and_parameters: list of statements and parameters :rtype: list[Row] :return: the rows matching the queries """ ret = [] size = self.CONCURRENCY for sub_sequence in CassandraClient.split_sequence(statements_and_parameters, size): results = c_concurrent.execute_concurrent( self._session, sub_sequence, concurrency=size, ) for result in results: success, rows = result if success: for row in rows: ret.append(row) else: raise RuntimeError return ret def _execute(self, statements_and_parameters): """ Execute a list of statements and parameters NOT returning data. :param iterable[tuple] statements_and_parameters: list of statements and parameters """ size = self.CONCURRENCY for sub_sequence in CassandraClient.split_sequence(statements_and_parameters, size): c_concurrent.execute_concurrent( self._session, sub_sequence, concurrency=size, ) def _buffer(self, statements_and_parameters): """ Buffer (and execute) statements and parameters NOT returning data. :param iterable[tuple] statements_and_parameters: list of statements and parameters """ self._statements_and_parameters.extend(statements_and_parameters) if len(self._statements_and_parameters) >= self._buffer_size: self.empty_buffer() def empty_buffer(self): """ Empty the buffer of statements and parameters. """ # copy the underlying list in a python2/3 compatible way buffer = list(self._statements_and_parameters) # delete the actual elements in a python2/3 compatible way del self._statements_and_parameters[:] self._execute(buffer) def insert(self, key, vals, buffer=False): """ Insert an iterable of values with the same key. :param byte|str key: the key :param iterable[byte|str] vals: the iterable of values :param boolean buffer: whether the insert statements should be buffered """ statements_and_parameters = [ (self._stmt_insert, (self._key_encoder(key), self._val_encoder(val), self._ts())) for val in vals ] if buffer: self._buffer(statements_and_parameters) else: self._execute(statements_and_parameters) def upsert(self, key, vals, buffer=False): """ Upsert an iterable of values with the same key. Note: this is used when treating a Cassandra partition as a set. Since we upsert data we never store duplicates. In this case the timestamp loses its meaning as we are not interested in sorting records anymore (it is a set after all) and we can safely overwrite every time we are storing a duplicate. :param byte|str key: the key :param iterable[byte|str] vals: the iterable of values :param boolean buffer: whether the upsert statements should be buffered """ statements_and_parameters = [ (self._stmt_upsert, (self._ts(), self._key_encoder(key), self._val_encoder(val))) for val in vals ] if buffer: self._buffer(statements_and_parameters) else: self._execute(statements_and_parameters) def delete_keys(self, keys, buffer=False): """ Delete a key (and all its values). :param iterable[byte|str] keys: the key :param boolean buffer: whether the delete statements should be buffered """ statements_and_parameters = [ (self._stmt_delete_key, (self._key_encoder(key), )) for key in keys ] if buffer: self._buffer(statements_and_parameters) else: self._execute(statements_and_parameters) def delete(self, key, val, buffer=False): """ Delete a value from a key. :param byte|str key: the key :param byte|str val: the value :param boolean buffer: whether the delete statement should be buffered """ statements_and_parameters = [ (self._stmt_delete_val, (self._key_encoder(key), self._val_encoder(val))) ] if buffer: self._buffer(statements_and_parameters) else: self._execute(statements_and_parameters) def get_keys(self): """ Get all the keys. Note: selecting all keys in Cassandra via "SELECT DISTINCT key FROM table" is bound to time out since all nodes need to be contacted. To avoid this, we paginate through all keys using the TOKEN function. In this way we issue several different queries which alone can not time out. :rtype: set[byte|str] :return: the set of all keys """ min_token = self.MIN_TOKEN keys = set([]) while True: rows = self._session.execute(self._stmt_get_keys, (min_token, self.PAGE_SIZE)) if not rows: break for r in rows: keys.add(self._key_decoder(r.key)) min_token = r.f_token + 1 return keys def select(self, keys): """ Select all values for the given keys. :param iterable[byte|str] keys: the keys :rtype: dict[byte|str,list[byte|str] :return: a dictionary of lists """ statements_and_parameters = [ (self._stmt_get, (self._key_encoder(key), )) for key in keys ] ret = collections.defaultdict(list) for row in self._select(statements_and_parameters): ret[self._key_decoder(row.key)].append((self._val_decoder(row.value), row.ts)) return { k: [x[0] for x in sorted(v, key=operator.itemgetter(1))] for k, v in ret.items() } def select_count(self, keys): """ Count the values for each of the provided keys. :param iterable[byte|str] keys: list of keys :rtype: dict[byte|str,int] :return: the number of values per key """ statements_and_parameters = [ (self._stmt_get_count, (self._key_encoder(key), )) for key in keys ] return { self._key_decoder(row.key): row.count for row in self._select(statements_and_parameters) } def one(self, key): """ Select one single value of the given key. :param byte|str key: the key :rtype: byte|str|None :return: a single value for that key or None if the key does not exist """ rows = self._session.execute(self._stmt_get_one, (self._key_encoder(key),)) if rows: row = next(iter(rows)) return self._val_decoder(row.value) return None class CassandraStorage(object): """ Storage implementation using Cassandra. Note: like other implementations, each storage has its own client. Unlike other implementations, all storage instances share one session and can potentially share the same buffer. """ DEFAULT_BUFFER_SIZE = 5000 def __init__(self, config, name=None, buffer_size=None): """ Constructor. :param dict[str, any] config: configuration following the following format: { 'basename': b'test', 'type': 'cassandra', 'cassandra': { 'seeds': ['127.0.0.1'], 'keyspace': 'lsh_test', 'replication': { 'class': 'SimpleStrategy', 'replication_factor': '1' }, 'drop_keyspace': True, 'drop_tables': True, 'shared_buffer': False, } } :param bytes name: the name :param int buffer_size: the buffer size """ self._config = config if buffer_size is None: buffer_size = CassandraStorage.DEFAULT_BUFFER_SIZE cassandra_param = self._parse_config(self._config['cassandra']) self._name = name if name else _random_name(11).decode('utf-8') self._buffer_size = buffer_size self._client = CassandraClient(cassandra_param, name, self._buffer_size) @staticmethod def _parse_config(config): """ Parse a configuration dictionary, optionally fetching data from env variables. :param dict[str, any] config: the configuration :rtype: dict[str, str] :return: the parse configuration """ cfg = {} for key, value in config.items(): if isinstance(value, dict): if 'env' in value: value = os.getenv(value['env'], value.get('default', None)) cfg[key] = value return cfg @property def buffer_size(self): """ Get the buffer size. :rtype: int :return: the buffer size """ return self._buffer_size @buffer_size.setter def buffer_size(self, value): """ Set the buffer size and propagate it to the underlying client. :param int value: buffer size """ self._buffer_size = value self._client.buffer_size = value def __getstate__(self): """ Get a pickable state by removing unpickable objects. :rtype: dict[str, any] :return: the state """ state = self.__dict__.copy() state.pop('_client') return state def __setstate__(self, state): """ Set the state by reconnecting ephemeral objects. :param dict[str, any] state: the state to restore """ self.__dict__ = state self.__init__(self._config, name=self._name, buffer_size=self._buffer_size) class CassandraListStorage(OrderedStorage, CassandraStorage): """ OrderedStorage storage implementation using Cassandra as backend. Note: Since we need to (i) select and delete values by both 'key' and by 'key and value', and (ii) allow duplicate values, we store a monotonically increasing timestamp as additional value. """ def keys(self): """Implement interface.""" return self._client.get_keys() def get(self, key): """Implement interface.""" return self._client.select([key]).get(key, []) def getmany(self, *keys): """Implement interface.""" return self._client.select(keys).values() def insert(self, key, *vals, **kwargs): """Implement interface.""" buffer = kwargs.pop('buffer', False) self._client.insert(key, vals, buffer) def remove(self, *keys, **kwargs): """Implement interface.""" buffer = kwargs.pop('buffer', False) self._client.delete_keys(keys, buffer) def remove_val(self, key, val, **kwargs): """Implement interface.""" buffer = kwargs.pop('buffer', False) self._client.delete(key, val, buffer) def size(self): """Implement interface.""" return len(self.keys()) def itemcounts(self): """Implement interface.""" return self._client.select_count(self.keys()) def has_key(self, key): """Implement interface.""" return self._client.one(key) is not None def empty_buffer(self): """Implement interface.""" self._client.empty_buffer() class CassandraSetStorage(UnorderedStorage, CassandraListStorage): """ OrderedStorage storage implementation using Cassandra as backend. Note: since we are interested in keeping duplicates or ordered data, we upsert the data ignoring what the timestamp actually means. """ def get(self, key): """Implement interface and override super-class.""" return set(super(CassandraSetStorage, self).get(key)) def insert(self, key, *vals, **kwargs): """Implement interface and override super-class.""" buffer = kwargs.pop('buffer', False) self._client.upsert(key, vals, buffer) if redis is not None: class RedisBuffer(redis.client.Pipeline): '''A bufferized version of `redis.pipeline.Pipeline`. The only difference from the conventional pipeline object is the ``_buffer_size``. Once the buffer is longer than the buffer size, the pipeline is automatically executed, and the buffer cleared. ''' def __init__(self, connection_pool, response_callbacks, transaction, buffer_size,
#!/usr/bin/env python import argparse import numpy as np import tqdm import sys from LLC_Membranes.llclib import timeseries, fitting_functions, stats, rand from LLC_Membranes.analysis import Poly_fit import matplotlib.pyplot as plt from multiprocessing import Pool import time as timer import fbm from LLC_Membranes.timeseries.fractional_levy_motion import FLM from LLC_Membranes.timeseries import flm_sim_params def initialize(): parser = argparse.ArgumentParser(description='Simulate fractional brownian motion and calculate its MSD') parser.add_argument('-hop', '--hop_length_distribution', default='gaussian', help='Functional form of hop length' 'distribution') # if more distributions are included, this will need to be more complicated depending what parameters are needed parser.add_argument('-hs', '--hop_sigma', default=1, type=float, help='Standard deviation of gaussian distribution ' 'used for drawing hop lengths') parser.add_argument('-H', '--hurst', default=0.5, type=float, help='Hurst parameter, used if hops are modeled with ' 'fractional brownian motion') parser.add_argument('-n', '--noise', default=0, type=float, help='Magnitude of gaussian noise to add to generated' 'time series') parser.add_argument('-dwell', '--dwell_time_distribution', default='power', help='Functional form of dwell time' 'distribution (options: power, exponential') parser.add_argument('-steps', '--steps', default=1000, type=int, help='Number of steps to take for each' 'independent trajectory') parser.add_argument('-ntraj', '--ntraj', default=100, type=int, help='Number of independent ctrw trajectories.') parser.add_argument('-ensemble', '--ensemble', action="store_true", help='Calculate MSD as ensemble average') parser.add_argument('-power_law', '--fit_power_law', action="store_true", help='Fit MSD to a power law') parser.add_argument('-linear', '--fit_line', action="store_true", help='Fit a line to the MSD') parser.add_argument('-alpha', '--alpha', default=0.5, type=float, help='Anomalous exponent') parser.add_argument('-lamb', '--lamb', default=0.5, type=float, help='Exponential decay rate') parser.add_argument('-dt', '--dt', default=1, type=float, help='Discrete time step for fixed length simulations') parser.add_argument('-fix_time', '--fix_time', action="store_true", help='Fix the total time of simulated ' 'trajectories. Total length will be steps*dt') parser.add_argument('-b', '--nboot', default=200, help='Number of bootstrap trials') parser.add_argument('-ul', '--upper_limit', default=None, type=int, help='Upper limit on dwell-time length') parser.add_argument('-acf', '--autocorrelation', action="store_true", help='Plot step autocorrelation function') # parallelization parser.add_argument('-nt', '--nthreads', default=0, type=int, help='Number of threads to use for parallelized ' 'portions of the code.') return parser class CTRW(object): def __init__(self, length, ntraj, nmodes=1, hop_dist='gaussian', dwell_dist='power', hop_sigma=1, alpha=0.5, lamb=0.5, padding=10, dt=1, H=0.5, transition_count_matrix=None): """ Initialize simulation of a continuous time random walk :param length: length of each simulated trajectory. If you fix the number of steps, this equals the number of \ steps. If you fix the time, the total length of the simulation is length * dt. If you fix the displacement, \ this is the absolute distance a particle must travel before the trajectory is discontinued. :param ntraj: number of independent trajectories to generate :param nmodes: number of modes. There should be sigma, alpha and H parameters for each mode :param hop_dist: Method used to generate random hop lengths. "gaussian" draws randomly from a gaussian \ distribution, while "fbm" generates correlated hops as in fractional brownian motion. :param dwell_dist: Name of probability distribution function used to generate random dwell times. If None, \ dwell times will all be 1 time step. :param hop_sigma: Sigma for Gaussian hop_dist random draws :param alpha: Anomalous exponent for power law random draws :param lamb: rate of decay for exponential random draws :param padding: multiplies number of discrete time points used to interpolate trajectories :param dt: time step for fixed time simulations :param nt: number of threads to use where parallelized :param H: Hurst parameter for fractional brownian motion. Equals 2*alpha for pure FBM. For H < 0.5, \ trajectories are subdiffusive, when H = 0.5, brownian motion is recovered. :param transition_count_matrix: nmode x nmode matrix of counts of transitions between states. Only needed if \ nmodes > 1. :type length: int or float :type ntraj: int :type nmodes: int :type hop_dist: str :type dwell_dist: str or NoneType :type hop_sigma: float :type alpha: float :type lamb: float :type padding: int :type dt: float :type nt: int :type H: float :type transition_count_matrix: np.ndarray """ self.length = length self.ntraj = ntraj self.hop_distribution = hop_dist self.hop_sigma = hop_sigma self.dwell_distribution = dwell_dist self.padding = padding self.alpha = alpha self.lamb = lamb self.dt = dt self.H = H self.nmodes = nmodes if self.hop_distribution in ['flm', 'fractional_levy_motion']: self.hurst_correction = flm_sim_params.HurstCorrection() self.truncation_correction = flm_sim_params.TruncateLevy() if self.nmodes > 1: if transition_count_matrix is not None: self.transition_count_matrix = transition_count_matrix else: sys.exit("You must provide a transition count matrix if nmodes is greater than 1") self.trajectories = np.zeros([self.ntraj, self.length, 2]) self.trajectory_hops = np.zeros([self.ntraj, 2 * self.length - 1, 2]) # for visualization self.time = None self.time_uniform = None self.z_interpolated = np.zeros([self.ntraj, self.length*self.padding]) # separate from time_uniform to save memory self.msd = None self.fit_parameters = None self.bootstraps = None self.fit_cut = 1 self.fit_start = 0 self.acf = None # autocorrelation function self.final_msd = None self.steps = [] # Initialize multi-threading self.pbar = None # For fractional levy motion self.m = 256 self.Mlowerbound = 6000 def generate_trajectories(self, max_hop=None, fixed_time=False, noise=0, ll=0.1, limit=None, distributions=None, discrete=False, fixed_displacement=False, m=256, Mlowerbound=6000, nt=1): """ Create trajectories by randomly drawing from dwell and hop distributions :param max_hop: maximum distance a solute can hop :param fixed_time: Propagate each trajectory until a certain wall time is reached :param noise: add gaussian noise to final trajectories :param ll: lower limit of power law distribution :param limit: upper limit on dwell time length (As implemented, this is not mathematically sound, so it's only \ for demonstration purposes) :param distributions: distributions of alpha and sigma values for dwell and hop length distributions \ respectively. Passed as 2-tuple of arrays where each array contains a possible values of each parameter. :param discrete: pull from discrete dwell time probability distributions :param fixed_displacement: generate trajectories that stop once they reach a certain absolute displacement :param m: mesh size. Choose a value that is a power of 2 :param Mlowerbound: M is the kernel function cut-off. This is the lowest possible value of M that will be \ chosen. M will be chosen such that M + length is a power of 2. Higher values of M will lead to more accurate \ calculation of the correlation structure at large time lags. :param nt: number of threads to use for trajectory generation :type max_hop: float or NoneType :type fixed_time: bool :type noise: float :type ll: list of floats :type limit: float :type distributions: tuple :type discrete: bool :type fixed_displacement: bool :type m: int :type Mlowerbound: int :type nt: int """ self.m = m self.Mlowerbound = Mlowerbound self.time_uniform = np.linspace(0, self.length, self.length * self.padding) if type(ll) is not list: ll = [ll] if nt > 1: pool = Pool(processes=nt) # set up multiprocessing pool trajectories_per_thread = self.ntraj // nt arguments = [] for n in range(nt): if n == (nt - 1): # give the last worker a little extra work ntraj = trajectories_per_thread + (self.ntraj - nt * trajectories_per_thread) arguments.append((ntraj, ll, distributions, discrete, noise, max_hop)) else: arguments.append((trajectories_per_thread, ll, distributions, discrete, noise, max_hop)) result = pool.starmap(self._fixed_time_trajectories, arguments) # do the calculations # unpack result list for i, r in enumerate(result): if i < (nt - 1): self.z_interpolated[i * trajectories_per_thread:(i + 1) * trajectories_per_thread, :] = r[0] else: self.z_interpolated[i * trajectories_per_thread:(i + 1) * trajectories_per_thread, :] = r[0] self.steps += r[1] pool.close() pool.join() else: if fixed_time: self.z_interpolated, self.steps = self._fixed_time_trajectories(self.ntraj, ll, distributions, discrete, noise, max_hop) else: self.fixed_steps_trajectories(noise=noise, nt=nt, ll=ll, limit=limit) self.time_uniform *= self.dt def _fixed_time_trajectories(self, n, ll, distributions, discrete, noise, max_hop): """ Create trajectories of fixed length where dwell times and hop lengths are drawn from the appropriate distributions. :param n: number of trajectories to generate :param max_hop: maximum distance a solute can hop :param ll: lower limit of power law distribution :param distributions: distributions of alpha and sigma values for dwell and hop length distributions \ respectively. Passed as 2-tuple of arrays where each array contains possible values of each parameter. :param discrete: pull from discrete dwell time probability distributions :param noise: add Gaussian noise to final trajectories :type n: int :type max_hop: float or NoneType :type ll: list of floats :type distributions: tuple :type discrete: bool :type noise: float """ z_interpolated = np.zeros([n, self.length*self.padding]) # separate from time_uniform to save memory steps = [] if type(ll) is not list: ll = [ll] # generate ntraj realizations for t in tqdm.tqdm(range(n)): # Get H, alpha and sigma parameter values if distributions is not None: # if self.dwell_distribution == 'exponential': # self.lamb =
""" Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import re import unittest2 as unittest import tinctest from tinctest.runner import TINCTextTestResult from tinctest.lib import Gpdiff from mpp.models import SQLTestCase from mpp.lib.PSQL import PSQL @tinctest.skipLoading("Test model. No tests loaded.") class SQLPerformanceTestCase(SQLTestCase): """ @metadata: repetitions: number of times the sql should be executed (default: 3) @metadata: threshold: if the current performance of the query is worse than baseline by this much percentage, the test is marked failed (default: 5) @metadata: timeout: number of seconds to wait for the query to complete. When timeout is reached, the query is terminated. Value of 0 means no timeout. (default: 0) @metadata: drop_caches: whether to drop system cache and restart cluster before running each query (default: True) @metadata: explain: whether to gather explain plan details for each query (default: True) @metadata: plandiff: tdb """ def __init__(self, methodName, baseline_result = None, sql_file=None, db_name = None): self.repetitions = None self.threshold = None self.timeout = None self.plandiff = True self.doexplain = True self.drop_caches = True self._runtime = -1.0 self._result_string = None # Add field to store plan body self._plan_body = '' # Switch to control whether to avoid run test or not self._avoid_execution = False; # Add dict to store optimization times self._optimization_time = {}; # Add string to store explain analyze output self._ea_output = '' super(SQLPerformanceTestCase, self).__init__(methodName, baseline_result, sql_file, db_name) self.gucs.add('statement_timeout='+str(self.timeout)) def _infer_metadata(self): super(SQLPerformanceTestCase, self)._infer_metadata() self.repetitions = int(self._metadata.get('repetitions', '3')) self.threshold = int(self._metadata.get('threshold', '5')) self.timeout = int(self._metadata.get('timeout', '0')) # 0 means unbounded by default. if self._metadata.get('drop_caches', 'True') == 'False': self.drop_caches = False if self._metadata.get('explain', 'True') == 'False': self.doexplain = False if self._metadata.get('plandiff', 'True') == 'False': self.plandiff = False def setUp(self): # Setup the database by calling out to the super class tinctest.logger.trace_in() super(SQLPerformanceTestCase, self).setUp() #Collect explain output and then compare with that of the last run if self.doexplain: self._compare_previous_plan() tinctest.logger.trace_out() def _compare_previous_plan(self): """ Get plan first and then compare with that of the previous run. If nothing change in the plan structure, there is no need to re-execute that query. The result will be copied from the previous run. """ #execute the explain sql to fetch plan explain_sql_file = os.path.join(self.get_out_dir(), os.path.basename(self.sql_file).replace('.sql','_explain.sql')) with open(explain_sql_file, 'w') as o: with open(self.sql_file, 'r') as f: explain_write = False for line in f: if not line.startswith('--') and not explain_write: #keep all the GUCs o.write('-- start_ignore\n') for guc_string in self.gucs: o.write("SET %s;" %guc_string) o.write(line) for orca_guc_string in self.orcagucs: o.write("%s;\n"%orca_guc_string) # Add gucs to print optimization time to log o.write("SET optimizer_print_optimization_stats=on;\n") o.write("SET client_min_messages='log';\n") o.write("SELECT gp_opt_version();\n") o.write("SELECT current_timestamp;\n") o.write('-- end_ignore\n') o.write('explain %s' %line) explain_write = True else: o.write(line); explain_out_file = os.path.join(self.get_out_dir(), os.path.basename(explain_sql_file).replace('.sql','.out')) tinctest.logger.info("Gathering explain from sql : " + explain_sql_file) PSQL.run_sql_file(explain_sql_file, dbname = self.db_name, out_file = explain_out_file) # rewrite plan to keep plan body self._rewrite_plan_file(explain_out_file) # retrieve previous plan and store it into local file if self.baseline_result and self.plandiff: if self.baseline_result.result_detail: if 'plan_body' in self.baseline_result.result_detail.keys(): previous_explain_output = self.baseline_result.result_detail['plan_body'] previous_explain_output_file = explain_out_file.replace('.out','_previous.out') with open(previous_explain_output_file, 'w') as o: o.write(previous_explain_output) # call GPDiff to compare two plans if Gpdiff.are_files_equal(previous_explain_output_file, explain_out_file): # two plans are the same, avoid execution self._avoid_execution = True self._runtime = self.baseline_result.value # copy the runtime from previous result self._result_string = self.baseline_result.result_string # comment it out as we are experiencing some problems during parse. if 'explain_analyze' in self.baseline_result.result_detail.keys(): tmp_ea = self.baseline_result.result_detail['explain_analyze'] self._ea_output = tmp_ea.replace('\\','') if len(self._ea_output) == 0: # if there is no previous explain analyze output, generate it self._generate_explain_analyze_output() else: self._generate_explain_analyze_output() def _generate_explain_analyze_output(self): """ execute explain analyze output for a given query """ ea_sql_file = os.path.join(self.get_out_dir(), os.path.basename(self.sql_file).replace('.sql','_explain_analyze.sql')) with open(ea_sql_file, 'w') as o: with open(self.sql_file, 'r') as f: explain_write = False for line in f: if not line.startswith('--') and not explain_write: #keep all the GUCs o.write('-- start_ignore\n') for guc_string in self.gucs: o.write("SET %s;" %guc_string) o.write(line) for orca_guc_string in self.orcagucs: o.write("%s;\n"%orca_guc_string) # Add gucs to print optimization time to log o.write("SET optimizer_print_optimization_stats=on;\n") o.write("SET client_min_messages='log';\n") o.write('-- end_ignore\n') o.write('explain analyze %s' %line) explain_write = True else: o.write(line); ea_out_file = ea_sql_file.replace('.sql','.out') PSQL.run_sql_file(ea_sql_file, dbname = self.db_name, out_file = ea_out_file) with open(ea_out_file, 'r') as f: self._ea_output = f.read() def _rewrite_plan_file(self, explain_out_file): """ rewrite explain output to keep only GUC info and plan body """ # initialize time variable dxl_query_serialization_time = 0.0 dxl_expr_translation_time = 0.0 group_merge_time = 0.0 total_opt_time = 0.0 stats_derivation_time = 0.0 expr_dxl_translation_time = 0.0 dxl_plan_serialization_time = 0.0 guc_plan_content = '' with open(explain_out_file, 'r') as f: able_to_write = True fall_back_check = False for line in f: # ignore the part that from '-- end_ignore' to 'QUERY PLAN' if line.startswith('-- end_ignore'): guc_plan_content += line able_to_write = False elif line.find('QUERY PLAN') != -1: guc_plan_content += '-- force_explain\n' able_to_write = True if able_to_write: guc_plan_content += line # collect total optimization time and statistics derivation time if line.find("Statistics Derivation Time")!=-1: try: stats_derivation_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: stats_derivation_time = -1.0 elif line.find("Total Optimization Time")!=-1: try: total_opt_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: total_opt_time = -1.0 elif line.find("DXL Query Serialization Time")!=-1: try: dxl_query_serialization_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: dxl_query_serialization_time = -1.0 elif line.find("DXL To Expr Translation Time")!=-1: try: dxl_expr_translation_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: dxl_expr_translation_time = -1.0 elif line.find("Group Merge Time")!=-1: try: group_merge_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: group_merge_time = -1.0 elif line.find("Expr To DXL Translation Time")!=-1: try: expr_dxl_translation_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: expr_dxl_translation_time = -1.0 elif line.find("DXL Plan Serialization Time")!=-1: try: dxl_plan_serialization_time = float(line[line.rindex(':')+1:line.rindex('ms')].strip()) except ValueError: dxl_plan_serialization_time = -1.0 elif line.find('Planner produced plan :0')!=-1 and fall_back_check == False: fall_back_stats_path = os.path.join(self.get_out_dir(), 'fall_back_stats.txt') existing = os.path.exists(fall_back_stats_path) mode = 'a' if existing else 'w' with open(fall_back_stats_path, mode) as f: f.write('%s Expected fall back\n'%self.sql_file) fall_back_check = True elif line.find('Planner produced plan :1')!=-1 and fall_back_check == False: fall_back_stats_path = os.path.join(self.get_out_dir(), 'fall_back_stats.txt') existing = os.path.exists(fall_back_stats_path) mode = 'a' if existing else 'w' with open(fall_back_stats_path, mode) as f: f.write('%s Unexpected fall back\n'%self.sql_file) fall_back_check = True self._optimization_time['total_opt_time'] = total_opt_time self._optimization_time['statistics_time'] = stats_derivation_time self._optimization_time['dxl_query_serialization_time'] = dxl_query_serialization_time self._optimization_time['dxl_expr_translation_time'] = dxl_expr_translation_time self._optimization_time['group_merge_time'] = group_merge_time self._optimization_time['expr_dxl_translation_time'] = expr_dxl_translation_time self._optimization_time['dxl_plan_serialization_time'] = dxl_plan_serialization_time self._plan_body = guc_plan_content with open(explain_out_file, 'w') as o: o.write(guc_plan_content) def run_test(self): """ The method that subclasses should override to execute a sql test case differently. This encapsulates the execution mechanism of SQLTestCase. Given a base sql file and an ans file, runs all the sql files for the test case. Note that this also runs the other part sqls that make up the test case. For eg: if the base sql is query1.sql, the part sqls are of the form query1_part*.sql in the same location as the base sql. """ tinctest.logger.trace_in() sql_file = self.sql_file ans_file = self.ans_file # if the plan is the same as previous one, skip this run if self._avoid_execution: tinctest.logger.info("Skipping test execution as there is no plan change w.r.t previous run.") str_runtime_list = [] for i in range(self.repetitions): str_runtime_list.append(str(self._runtime)) # dump statistics to a runtime_stats.csv file output_file_path = os.path.join(self.get_out_dir(), 'runtime_stats.csv') existing = os.path.exists(output_file_path) mode = 'a' if existing else 'w' with open(output_file_path, mode) as f: f.write("%s,%s\n" % (os.path.basename(sql_file), ",".join(str_runtime_list))) if self._result_string == 'FAIL' or self._result_string == 'ERROR': tinctest.logger.trace_out("False") return False else: tinctest.logger.trace_out("True") return True guc_sql_file = self._add_gucs_to_sql_file(sql_file) runtime_list = [] for i in range(self.repetitions): # refresh the caches after each iteration if self.drop_caches: self._restart_cluster(refresh_cache=True) runtime_list.append(self._run_and_measure_sql_file(guc_sql_file, i, ans_file)) # dump statistics to a runtime_stats.csv file str_runtime_list = [str(x) for x in runtime_list] output_file_path = os.path.join(self.get_out_dir(), 'runtime_stats.csv') existing = os.path.exists(output_file_path) mode = 'a' if existing else 'w' with open(output_file_path, mode) as f: f.write("%s,%s\n" % (os.path.basename(sql_file), ",".join(str_runtime_list))) self._runtime = min(runtime_list) tinctest.logger.trace_out("True") return True def _run_and_measure_sql_file(self, sql_file, iteration, ans_file = None): """ Given a sql file and an ans file, this adds
block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'createAtomicSwap', 'burn_token_original': burn_token_original, 'burn_token': burn_token, 'fee_amount_original': fee_amount_original, 'fee_amount': fee_amount, 'fee_amount_fixed8': fee_amount_fixed8, 'fee_asset_original': fee_asset_original, 'fee_asset': fee_asset, 'fee_asset_name': fee_asset_name, 'expiry_time_original': expiry_time_original, 'expiry_time': expiry_time, 'hash_of_secret_original': hash_of_secret_original, 'hash_of_secret': hash_of_secret, 'amount_original': amount_original, 'amount': amount, 'amount_fixed8': amount_fixed8, 'asset_original': asset_original, 'asset': asset, 'asset_name': asset_name, 'taker_address_original': taker_address_original, 'taker_address': taker_address, 'maker_address_original': maker_address_original, 'maker_address': maker_address } return create_atomic_swap_dict def deserialize_deploy(self, block, txn, script): pass def deserialize_deposit(self, block, txn, script): contract_hash = None for s in script: if str(s).split()[0] == "APPCALL": contract_hash = reverse_hex(str(s).split()[1][2:]) script[1] = self.zero_pad_if_odd_length_string(str(script[1]).split()[1][2:]) script[2] = self.zero_pad_if_odd_length_string(str(script[2]).split()[1][2:]).rjust(40, '0') if len(str(script[0]).split()) == 1 and str(script[0]).split()[0].startswith('PUSH'): # Redeemable Hash Puppies Deposit deposit_amount_original = str(script[0]).split()[0] deposit_amount = int(deposit_amount_original[4:]) deposit_amount_fixed8 = SwitcheoFixed8(deposit_amount).ToString() elif contract_hash == '0ec5712e0f7c63e4b0fea31029a28cea5e9d551f': deposit_amount_original = str(script[0]).split()[1][2:] deposit_amount = int(reverse_hex(deposit_amount_original), 16) deposit_amount_fixed8 = SwitcheoFixed8(deposit_amount).ToString() else: hex_string = self.zero_pad_if_odd_length_string(str(script[0]).split()[1][2:]) hex_bytes = int(str(script[0]).split()[0][-1]) if hex_bytes < 8: deposit_amount_original = hex_string.ljust(16, '0') elif hex_bytes == 8: deposit_amount_original = hex_string.rjust(16, '0') else: raise ValueError('Deposit Hex Byte amount greater than 8.') deposit_amount = int(reverse_hex(deposit_amount_original), 16) deposit_amount_fixed8 = SwitcheoFixed8(deposit_amount).ToString() deposit_dict = { 'block_hash': block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'deposit', 'deposit_amount_original': deposit_amount_original, 'deposit_amount': deposit_amount, 'deposit_amount_fixed8': deposit_amount_fixed8, 'deposit_asset_original': script[1], 'deposit_asset': reverse_hex(script[1]), 'deposit_asset_name': self.neo_token_dict[reverse_hex(script[1])], 'deposit_address_original': script[2], 'deposit_address': neo_get_address_from_scripthash(scripthash=reverse_hex(script[2])), 'deposits': [] } for transfer in txn['vout']: out_dict = { 'deposit_address': transfer['address'], 'deposit_asset': transfer['asset'], 'deposit_asset_name': self.neo_token_dict[transfer['asset'][2:]], 'deposit_amount': transfer['value'] } deposit_dict['deposits'].append(out_dict) return deposit_dict def deserialize_execute_atomic_swap(self, block, txn, script): preimage_original = str(script[0]) pad_length = int(preimage_original.split()[0][9:]) * 2 preimage_original = self.zero_pad_if_odd_length_string(preimage_original.split()[1][2:], output_size=pad_length) preimage = reverse_hex(preimage_original) hash_of_secret_original = str(script[1]) # hashOfSecret pad_length = int(hash_of_secret_original.split()[0][9:]) * 2 hash_of_secret_original = self.zero_pad_if_odd_length_string(hash_of_secret_original.split()[1][2:], output_size=pad_length) hash_of_secret = reverse_hex(hash_of_secret_original) execute_atomic_swap_dict = { 'block_hash': block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'executeAtomicSwap', 'hash_of_secret_original': hash_of_secret_original, 'hash_of_secret': hash_of_secret, 'preimage_original': preimage_original, 'preimage': preimage } return execute_atomic_swap_dict def deserialize_fill_offer(self, block, txn, script): contract_hash = None for s in script: if str(s).split()[0] == "APPCALL": # Needed for v1 Contract with 5 variables in block 2087974; tx: C9741EFBDDF1F43D6B8778B3887EE035D44BE49A5EDD093773BEF2FA231DF31E contract_hash = reverse_hex(str(s).split()[1][2:]) if contract_hash in ['0ec5712e0f7c63e4b0fea31029a28cea5e9d551f', '01bafeeafe62e651efc3a530fde170cf2f7b09bd']: # if str(script[0]).startswith('PUSH'): use_native_token_original = str(script[0]) use_native_token = True if use_native_token_original[4:] == 1 else False if len(str(script[1]).split()) == 1: amount_to_fill_original = str(script[1]) amount_to_fill = int(str(script[1])[4:]) amount_to_fill_fixed8 = SwitcheoFixed8(amount_to_fill).ToString() else: amount_to_fill_original = self.zero_pad_if_odd_length_string(str(script[1]).split()[1][2:]).rjust(8, '0') amount_to_fill = int(reverse_hex(amount_to_fill_original), 16) amount_to_fill_fixed8 = SwitcheoFixed8(amount_to_fill).ToString() offer_hash_original = self.zero_pad_if_odd_length_string(str(script[2]).split()[1][2:]).rjust(64, '0') offer_hash = reverse_hex(offer_hash_original) trading_pair_original = self.zero_pad_if_odd_length_string(str(script[3]).split()[1][2:]).rjust(104, '0') trading_pair = reverse_hex(trading_pair_original) taker_address_original = self.zero_pad_if_odd_length_string(str(script[4]).split()[1][2:]).rjust(40, '0') taker_address = neo_get_address_from_scripthash(scripthash=reverse_hex(taker_address_original)) fee_amount_original = None fee_amount = None fee_amount_fixed8 = None fee_asset_original = None fee_asset = None # reverse_hex(fee_asset_original) fee_asset_name = None # self.neo_token_dict[reverse_hex(fee_asset_original)] taker_amount_original = None taker_amount = None taker_amount_fixed8 = None fill_offer_dict = { 'amount_to_fill_original': amount_to_fill_original, 'amount_to_fill': amount_to_fill, 'amount_to_fill_fixed8': amount_to_fill_fixed8, 'block_hash': block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'fillOffer', 'fee_amount_original': fee_amount_original, 'fee_amount': fee_amount, 'fee_amount_fixed8': fee_amount_fixed8, 'fee_asset_original': fee_asset_original, 'fee_asset': fee_asset, 'fee_asset_name': fee_asset_name, 'taker_amount_original': taker_amount_original, 'taker_amount': taker_amount, 'taker_amount_fixed8': taker_amount_fixed8, 'trading_pair_original': trading_pair_original, 'trading_pair': trading_pair, 'offer_hash_original': offer_hash_original, 'offer_hash': offer_hash, 'taker_address_original': taker_address_original, 'taker_address': taker_address, 'use_native_token_original': use_native_token_original, 'use_native_token': use_native_token } elif contract_hash == '91b83e96f2a7c4fdf0c1688441ec61986c7cae26': script[2] = self.zero_pad_if_odd_length_string(str(script[2]).split()[1][2:]) script[4] = self.zero_pad_if_odd_length_string(str(script[4]).split()[1][2:]).rjust(64, '0') script[5] = self.zero_pad_if_odd_length_string(str(script[5]).split()[1][2:]).rjust(40, '0') if len(str(script[1]).split()) == 1 and len(str(script[3]).split()) == 1: # fillOffer with 1 taker and 0 fee: https://neoscan.io/transaction/B12E71381630318405480D69B868477B136B003F4050F6D1370B1B220DEBAF8D fee_amount_original = str(script[1]).split()[0] fee_amount = int(fee_amount_original[4:]) fee_amount_fixed8 = SwitcheoFixed8(fee_amount).ToString() taker_amount_original = str(script[3]).split()[0] taker_amount = int(taker_amount_original[4:]) taker_amount_fixed8 = SwitcheoFixed8(taker_amount).ToString() elif len(str(script[1]).split()) == 1 and len(str(script[3]).split()) == 2: # https://neoscan.io/transaction/2E7C792B63D281F79AE628EEEE80E180D939F98FBC92BE18F44CEF1A9299D6CC fee_amount_original = str(script[1]).split()[0] fee_amount = int(fee_amount_original[4:]) fee_amount_fixed8 = SwitcheoFixed8(fee_amount).ToString() taker_amount_original = self.zero_pad_if_odd_length_string(str(script[3]).split()[1][2:]).rjust(16, '0') taker_amount = int(reverse_hex(taker_amount_original), 16) taker_amount_fixed8 = SwitcheoFixed8(taker_amount).ToString() else: hex_string = self.zero_pad_if_odd_length_string(str(script[1]).split()[1][2:]) hex_bytes = int(str(script[1]).split()[0][-1]) if hex_bytes < 8: fee_amount_original = hex_string.ljust(16, '0') elif hex_bytes == 8: fee_amount_original = hex_string.rjust(16, '0') else: raise ValueError('Fee Hex Byte amount greater than 8.') fee_amount = int(reverse_hex(fee_amount_original), 16) fee_amount_fixed8 = SwitcheoFixed8(fee_amount).ToString() if len(str(script[3]).split()) == 1: taker_amount_original = str(script[3]).split()[0] taker_amount = int(taker_amount_original[4:]) taker_amount_fixed8 = SwitcheoFixed8(taker_amount).ToString() else: hex_string = self.zero_pad_if_odd_length_string(str(script[3]).split()[1][2:]) hex_bytes = int(str(script[3]).split()[0][-1]) if hex_bytes < 8: taker_amount_original = hex_string.ljust(16, '0') elif hex_bytes == 8: taker_amount_original = hex_string.rjust(16, '0') else: raise ValueError('Taker Hex Byte amount greater than 8.') taker_amount = int(reverse_hex(taker_amount_original), 16) taker_amount_fixed8 = SwitcheoFixed8(taker_amount).ToString() fill_offer_dict = { 'block_hash': block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'fillOffer', 'fee_amount_original': fee_amount_original, 'fee_amount': fee_amount, 'fee_amount_fixed8': fee_amount_fixed8, 'fee_asset_original': script[2], 'fee_asset': reverse_hex(script[2]), 'fee_asset_name': self.neo_token_dict[reverse_hex(script[2])], 'taker_amount_original': taker_amount_original, 'taker_amount': taker_amount, 'taker_amount_fixed8': taker_amount_fixed8, 'offer_hash_original': self.zero_pad_if_odd_length_string(script[4]), 'offer_hash': reverse_hex(script[4]), 'taker_address_original': script[5], 'taker_address': neo_get_address_from_scripthash(scripthash=reverse_hex(script[5])) } elif contract_hash in ['a32bcf5d7082f740a4007b16e812cf66a457c3d4', 'b9a70a85136ed73f1f94e83edfee68c00daf412f']: maker_fee_burn_original = None maker_fee_burn = None maker_fee_burn_amount_original = None maker_fee_hex_string = None maker_fee_burn_amount = None maker_fee_burn_amount_fixed8 = None taker_fee_burn_original = None taker_fee_burn = None taker_fee_burn_amount_original = None taker_fee_hex_string = None taker_fee_burn_amount = None taker_fee_burn_amount_fixed8 = None taker_fee_asset_original = None taker_fee_asset = None taker_fee_asset_name = None taker_amount_original = None maker_fee_burn_original = str(script[0]) # bool burnMakerFee if maker_fee_burn_original == 'PUSH1': maker_fee_burn = True elif maker_fee_burn_original == 'PUSH0': maker_fee_burn = False maker_fee_burn_amount_original = str(script[1]) # BigInteger makerFeeAmount if maker_fee_burn_amount_original.startswith('PUSH') and not maker_fee_burn_amount_original.startswith('PUSHBYTES'): maker_fee_burn_amount_original = maker_fee_burn_amount_original.split()[0] maker_fee_burn_amount = int(maker_fee_burn_amount_original[4:]) maker_fee_burn_amount_fixed8 = SwitcheoFixed8(maker_fee_burn_amount).ToString() else: maker_fee_burn_amount_original = self.zero_pad_if_odd_length_string(maker_fee_burn_amount_original.split()[1][2:]) maker_fee_burn_amount = int(reverse_hex(maker_fee_burn_amount_original), 16) maker_fee_burn_amount_fixed8 = SwitcheoFixed8(maker_fee_burn_amount).ToString() taker_fee_burn_original = str(script[2]) # bool burnTakerFee if taker_fee_burn_original == 'PUSH1': taker_fee_burn = True elif taker_fee_burn_original == 'PUSH0': taker_fee_burn = False taker_fee_burn_amount_original = str(script[3]) # BigInteger takerFeeAmount if taker_fee_burn_amount_original.startswith('PUSH') and not taker_fee_burn_amount_original.startswith('PUSHBYTES'): taker_fee_burn_amount_original = taker_fee_burn_amount_original.split()[0] taker_fee_burn_amount = int(taker_fee_burn_amount_original[4:]) taker_fee_burn_amount_fixed8 = SwitcheoFixed8(taker_fee_burn_amount).ToString() else: taker_fee_burn_amount_original = self.zero_pad_if_odd_length_string(taker_fee_burn_amount_original.split()[1][2:]) taker_fee_burn_amount = int(reverse_hex(taker_fee_burn_amount_original), 16) taker_fee_burn_amount_fixed8 = SwitcheoFixed8(taker_fee_burn_amount).ToString() taker_fee_asset_original = str(script[4]) # byte[] takerFeeAssetID pad_length = int(taker_fee_asset_original.split()[0][9:]) * 2 taker_fee_asset_original = self.zero_pad_if_odd_length_string(taker_fee_asset_original.split()[1][2:], output_size=pad_length) taker_fee_asset = reverse_hex(taker_fee_asset_original) taker_fee_asset_name = self.neo_token_dict[taker_fee_asset] taker_amount_original = str(script[5]) # BigInteger amountToTake if taker_amount_original.startswith('PUSH') and not taker_amount_original.startswith('PUSHBYTES'): taker_amount_original = taker_amount_original.split()[0] taker_amount = int(taker_amount_original[4:]) taker_amount_fixed8 = SwitcheoFixed8(taker_amount).ToString() else: pad_length = int(taker_amount_original.split()[0][9:]) * 2 taker_amount_original = self.zero_pad_if_odd_length_string(taker_amount_original.split()[1][2:], output_size = pad_length) taker_amount = int(reverse_hex(taker_amount_original), 16) taker_amount_fixed8 = SwitcheoFixed8(taker_amount).ToString() offer_hash_original = self.zero_pad_if_odd_length_string(str(script[6]).split()[1][2:]) # byte[] offerHash offer_hash = reverse_hex(offer_hash_original) taker_address_original = str(script[7]) # byte[] fillerAddress pad_length = int(taker_address_original.split()[0][9:]) * 2 taker_address_original = self.zero_pad_if_odd_length_string(taker_address_original.split()[1][2:], output_size=pad_length) taker_address = neo_get_address_from_scripthash(scripthash=reverse_hex(taker_address_original)) fill_offer_dict = { 'block_hash': block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'fillOffer', 'maker_fee_burn_original': maker_fee_burn_original, 'maker_fee_burn': maker_fee_burn, 'maker_fee_burn_amount_original': maker_fee_burn_amount_original, 'maker_fee_burn_amount': maker_fee_burn_amount, 'maker_fee_burn_amount_fixed8': maker_fee_burn_amount_fixed8, 'taker_fee_burn_original': taker_fee_burn_original, 'taker_fee_burn': taker_fee_burn, 'taker_fee_burn_amount_original': taker_fee_burn_amount_original, 'taker_fee_burn_amount': taker_fee_burn_amount, 'taker_fee_burn_amount_fixed8': taker_fee_burn_amount_fixed8, 'taker_fee_asset_original': taker_fee_asset_original, 'taker_fee_asset': taker_fee_asset, 'taker_fee_asset_name': taker_fee_asset_name, 'taker_amount_original': taker_amount_original, 'taker_amount': taker_amount, 'taker_amount_fixed8': taker_amount_fixed8, 'offer_hash_original': offer_hash_original, 'offer_hash': offer_hash, 'taker_address_original': taker_address_original, 'taker_address': taker_address } return fill_offer_dict def deserialize_freeze_trading(self, block, txn, script): freeze_trading_dict = { 'block_hash': block['hash'][2:], 'block_number': block['index'], 'block_size': block['size'], 'block_time': block['time'], 'block_date': datetime.datetime.utcfromtimestamp(block['time']).strftime('%Y-%m-%d'), 'contract_hash': txn['contract_hash'], 'contract_hash_version': txn['contract_hash_version'], 'transaction_hash': txn['txid'][2:], 'transaction_type': txn['type'], 'switcheo_transaction_type': 'freezeTrading', 'trade_state_original': str(script[0]), 'trade_state': 'Inactive' if int(str(script[0])[4:]) == 0 else 'Active' } return freeze_trading_dict def deserizlize_generate_tokens(self, block, txn, script): pass def deserialize_initialize(self, block, txn, script): pass def deserialize_make_offer(self, block, txn, script): contract_hash = None offer_hash = None for s in script: if str(s).split()[0] == "APPCALL": # Needed for v1 Contract with 5 variables in block 2087928; tx: E839E289CC8D435EA49EC5FA66427085A10D3D2508FBC164C0BA2DB53BCB0198 contract_hash = reverse_hex(str(s).split()[1][2:]) if contract_hash in ['0ec5712e0f7c63e4b0fea31029a28cea5e9d551f', '01bafeeafe62e651efc3a530fde170cf2f7b09bd']: want_amount_original = None want_amount = None want_amount_fixed8 = None want_asset_id_original = None want_asset_original = None want_asset = None want_asset_name = None offer_amount_original = None offer_amount = None offer_amount_fixed8 = None offer_asset_id_original = None offer_asset_original = None offer_asset = None offer_asset_name = None offer_hash_orignal = None maker_address_original = None maker_address = None switcheo_transaction_id_original = self.zero_pad_if_odd_length_string(str(script[4]).split()[1][2:]).rjust(64, '0') switcheo_transaction_id = 'v1.1' if contract_hash == '0ec5712e0f7c63e4b0fea31029a28cea5e9d551f' else 'v1.5' elif contract_hash == '91b83e96f2a7c4fdf0c1688441ec61986c7cae26': want_asset_original = self.zero_pad_if_odd_length_string(str(script[2]).split()[1][2:]) want_asset = reverse_hex(want_asset_original) want_asset_name = self.neo_token_dict[want_asset] offer_asset_original = self.zero_pad_if_odd_length_string(str(script[4]).split()[1][2:]) offer_asset = reverse_hex(offer_asset_original) offer_asset_name = self.neo_token_dict[offer_asset] maker_address_original = self.zero_pad_if_odd_length_string(str(script[5]).split()[1][2:]).rjust(40, '0') maker_address = neo_get_address_from_scripthash(scripthash=reverse_hex(maker_address_original)) # if script[2] == 'c4cbd934b09e3889e742a357d17b6c6f8e002823' and str(script[1]).split()[0].startswith('PUSH'): # Redeemable Hash Puppies Deposit if len(str(script[1]).split()) == 1 and len(str(script[3]).split()) == 1: want_amount_original = str(script[1]).split()[0] want_amount = int(want_amount_original[4:]) want_amount_fixed8 = SwitcheoFixed8(want_amount).ToString() offer_amount_original = str(script[3]).split()[0] offer_amount = int(offer_amount_original[4:]) offer_amount_fixed8 = SwitcheoFixed8(offer_amount).ToString() elif len(str(script[1]).split()) == 1 and len(str(script[3]).split()) == 2: want_amount_original = str(script[1]).split()[0] want_amount = int(want_amount_original[4:]) want_amount_fixed8 = SwitcheoFixed8(want_amount).ToString() offer_amount_original = self.zero_pad_if_odd_length_string(str(script[3]).split()[1][2:]).rjust(16, '0') offer_amount = int(reverse_hex(offer_amount_original), 16) offer_amount_fixed8 = SwitcheoFixed8(offer_amount).ToString() else: hex_string =
+ 5.6981417358594e-7*log( 236.453447120506 - 0.956203191157888*m.x1) + 5.0424073396149e-7*log(509.329744330999 - 0.868619394055885*m.x1) + 3.6945328156719e-7*log(719.780347180692 - 0.801072072839622*m.x1) + 1.1443315505226e-7*log(327.517012158458 - 0.926974955194555*m.x1) + 2.7110655469944e-7*log( 547.744519196025 - 0.856289587906023*m.x1) + 3.21107285072415e-6*log(495.371037182199 - 0.873099653379339*m.x1) + 9.9458634278688e-7*log(129.657584748611 - 0.990480947184819*m.x1) + 2.35630035822996e-6*log(261.674211430671 - 0.948108203331243*m.x1) + 6.2169063581427e-7*log( 373.959049138861 - 0.912068677201645*m.x1) + 1.47286345758327e-6*log(846.602134340364 - 0.760366691726443*m.x1) + 2.8743562167141e-7*log(244.433016330596 - 0.953642027078038*m.x1) + 4.241676143793e-8*log(448.653645634299 - 0.888094310607884*m.x1) + 3.107842418502e-8*log( 563.774842073764 - 0.851144412012199*m.x1) + 9.62612585838e-9*log(545.832820860844 - 0.856903176557072*m.x1) + 2.280550820748e-8*log(964.57779102352 - 0.722500610708998*m.x1) + 2.6062657347084e-7*log(386.804719169038 - 0.90794566406689*m.x1) + 8.072555581188e-8*log( 163.697577366872 - 0.979555293922485*m.x1) + 1.9124901050016e-7*log(461.599091032222 - 0.88393927305161*m.x1) + 5.114973546615e-8*log(534.130728924762 - 0.860659140912629*m.x1) + 1.2118019067993e-7*log(1218.02524091695 - 0.641152797599519*m.x1) + 2.417003911689e-8*log( 775.353772232083 - 0.783234936988245*m.x1) + 1.0677224872428e-7*log(556.971636607364 - 0.853328004260054*m.x1) + 3.307126226673e-8*log(600.07781259564 - 0.839492421579569*m.x1) + 7.834998369249e-8*log(553.074587453001 - 0.854578821446895*m.x1) + 2.098900852875e-8*log( 535.421327803972 - 0.860244903576765*m.x1) + 4.972558576005e-8*log(355.163421699796 - 0.918101419645222*m.x1) + 9.94456200525e-9*log(695.545998372881 - 0.808850455610791*m.x1) + 3.0142458643662e-7*log(692.989141812062 - 0.809671117605002*m.x1) + 7.1411281663263e-7*log( 1347.27032802532 - 0.599669621517727*m.x1) + 1.4069379756063e-7*log(391.919406907936 - 0.906304027260237*m.x1) + 7.735274184483e-8*log(1939.03334836773 - 0.409734281453512*m.x1) + 2.2272443732888e-6*log(100 + 0.77*m.x218*(3115.6025 + m.x1)/(0.000274060001623156 + m.x218) - m.x1) + 1.02508903122814e-5*log(100 + 0.77*m.x219*(3115.6025 + m.x1)/(0.00173416442478794 + m.x219) - m.x1) + 7.5107480204026e-6*log(100 + 0.77*m.x220*(3115.6025 + m.x1)/( 0.000335445948448057 + m.x220) - m.x1) + 2.3263525400872e-6*log(100 + 0.77*m.x221*(3115.6025 + m.x1)/(0.00279966370191667 + m.x221) - m.x1) + 5.5114222461836e-6*log(100 + 0.77*m.x222*( 3115.6025 + m.x1)/(0.000437411653915557 + m.x222) - m.x1) + 4.8771752747006e-6*log(100 + 0.77* m.x223*(3115.6025 + m.x1)/(0.000128228779107299 + m.x223) - m.x1) + 3.5734685610586e-6*log(100 + 0.77*m.x224*(3115.6025 + m.x1)/(7.57303265558781e-5 + m.x224) - m.x1) + 1.1068335357244e-6*log( 100 + 0.77*m.x225*(3115.6025 + m.x1)/(0.000251779290257831 + m.x225) - m.x1) + 2.6222280278736e-6 *log(100 + 0.77*m.x226*(3115.6025 + m.x1)/(0.00011496393522606 + m.x226) - m.x1) + 3.1058508482201e-5*log(100 + 0.77*m.x227*(3115.6025 + m.x1)/(0.000133687293666454 + m.x227) - m.x1) + 9.6199525204672e-6*log(100 + 0.77*m.x228*(3115.6025 + m.x1)/(0.00210750816509285 + m.x228 ) - m.x1) + 2.27908795797624e-5*log(100 + 0.77*m.x229*(3115.6025 + m.x1)/(0.000365061340932758 + m.x229) - m.x1) + 6.0131877361138e-6*log(100 + 0.77*m.x230*(3115.6025 + m.x1)/( 0.000204625222058021 + m.x230) - m.x1) + 1.42459994889738e-5*log(100 + 0.77*m.x231*(3115.6025 + m.x1)/(5.83853277549572e-5 + m.x231) - m.x1) + 2.7801679092254e-6*log(100 + 0.77*m.x232*( 3115.6025 + m.x1)/(0.000411788305223751 + m.x232) - m.x1) + 4.102682829542e-7*log(100 + 0.77* m.x233*(3115.6025 + m.x1)/(0.000155135301356022 + m.x233) - m.x1) + 3.006003121188e-7*log(100 + 0.77*m.x234*(3115.6025 + m.x1)/(0.000110078398992706 + m.x234) - m.x1) + 9.31069226772e-8*log(100 + 0.77*m.x235*(3115.6025 + m.x1)/(0.000115570007588387 + m.x235) - m.x1) + 2.205820618312e-7* log(100 + 0.77*m.x236*(3115.6025 + m.x1)/(4.68187059871874e-5 + m.x236) - m.x1) + 2.5208623469896e-6*log(100 + 0.77*m.x237*(3115.6025 + m.x1)/(0.000194278750885906 + m.x237) - m.x1) + 7.808030139672e-7*log(100 + 0.77*m.x238*(3115.6025 + m.x1)/(0.000967158090120206 + m.x238 ) - m.x1) + 1.8498206957504e-6*log(100 + 0.77*m.x239*(3115.6025 + m.x1)/(0.000148636954481809 + m.x239) - m.x1) + 4.94736359681e-7*log(100 + 0.77*m.x240*(3115.6025 + m.x1)/(0.000119396301924079 + m.x240) - m.x1) + 1.1720929904342e-6*log(100 + 0.77*m.x241*(3115.6025 + m.x1)/( 3.02251419621976e-5 + m.x241) - m.x1) + 2.337802347766e-7*log(100 + 0.77*m.x242*(3115.6025 + m.x1 )/(6.73278856644583e-5 + m.x242) - m.x1) + 1.0327348356232e-6*log(100 + 0.77*m.x243*(3115.6025 + m.x1)/(0.000112109935305285 + m.x243) - m.x1) + 3.198756700262e-7*log(100 + 0.77*m.x244*( 3115.6025 + m.x1)/(0.000100172250725714 + m.x244) - m.x1) + 7.578257318406e-7*log(100 + 0.77* m.x245*(3115.6025 + m.x1)/(0.0001133011344372 + m.x245) - m.x1) + 2.03012304525e-7*log(100 + 0.77 *m.x246*(3115.6025 + m.x1)/(0.000118964217618766 + m.x246) - m.x1) + 4.80961534947e-7*log(100 + 0.77*m.x247*(3115.6025 + m.x1)/(0.000221641328507187 + m.x247) - m.x1) + 9.6186937435e-8*log(100 + 0.77*m.x248*(3115.6025 + m.x1)/(7.98854666975635e-5 + m.x248) - m.x1) + 2.9154735846228e-6* log(100 + 0.77*m.x249*(3115.6025 + m.x1)/(8.03436631751567e-5 + m.x249) - m.x1) + 6.9071241929722e-6*log(100 + 0.77*m.x250*(3115.6025 + m.x1)/(2.43595847174932e-5 + m.x250) - m.x1 ) + 1.3608347452922e-6*log(100 + 0.77*m.x251*(3115.6025 + m.x1)/(0.000190412613378249 + m.x251) - m.x1) + 7.481800944402e-7*log(100 + 0.77*m.x252*(3115.6025 + m.x1)/(8.03263481518263e-6 + m.x252) - m.x1) + 3.48578463039596e-6*log(100 + 0.77*m.x253*(3115.6025 + m.x1)/( 0.000115610604617764 + m.x253) - m.x1) + 1.60433207630746e-5*log(100 + 0.77*m.x254*(3115.6025 + m.x1)/(0.000731547093588758 + m.x254) - m.x1) + 1.17548169955132e-5*log(100 + 0.77*m.x255*( 3115.6025 + m.x1)/(0.000141505906323334 + m.x255) - m.x1) + 3.64089546094324e-6*log(100 + 0.77* m.x256*(3115.6025 + m.x1)/(0.00118102171563895 + m.x256) - m.x1) + 8.62574003453462e-6*log(100 + 0.77*m.x257*(3115.6025 + m.x1)/(0.000184519541255673 + m.x257) - m.x1) + 7.63310161030727e-6*log( 100 + 0.77*m.x258*(3115.6025 + m.x1)/(5.40925585426253e-5 + m.x258) - m.x1) + 5.59271444872837e-6 *log(100 + 0.77*m.x259*(3115.6025 + m.x1)/(3.19463941807333e-5 + m.x259) - m.x1) + 1.73226762788398e-6*log(100 + 0.77*m.x260*(3115.6025 + m.x1)/(0.000106211617180695 + m.x260) - m.x1) + 4.10396015209512e-6*log(100 + 0.77*m.x261*(3115.6025 + m.x1)/(4.84968619353583e-5 + m.x261) - m.x1) + 4.86086182588105e-5*log(100 + 0.77*m.x262*(3115.6025 + m.x1)/( 5.63952009010913e-5 + m.x262) - m.x1) + 1.50558614237142e-5*log(100 + 0.77*m.x263*(3115.6025 + m.x1)/(0.000889039961177141 + m.x263) - m.x1) + 3.56692326648611e-5*log(100 + 0.77*m.x264*( 3115.6025 + m.x1)/(0.000153998985980601 + m.x264) - m.x1) + 9.41103618516721e-6*log(100 + 0.77* m.x265*(3115.6025 + m.x1)/(8.63199500184675e-5 + m.x265) - m.x1) + 2.22959306391542e-5*log(100 + 0.77*m.x266*(3115.6025 + m.x1)/(2.46295081463161e-5 + m.x266) - m.x1) + 4.35114650377943e-6*log( 100 + 0.77*m.x267*(3115.6025 + m.x1)/(0.000173710481863398 + m.x267) - m.x1) + 6.4209697517339e-7 *log(100 + 0.77*m.x268*(3115.6025 + m.x1)/(6.54429171754531e-5 + m.x268) - m.x1) + 4.7045935346946e-7*log(100 + 0.77*m.x269*(3115.6025 + m.x1)/(4.64359271237298e-5 + m.x269) - m.x1 ) + 1.4571848690874e-7*log(100 + 0.77*m.x270*(3115.6025 + m.x1)/(4.87525300074436e-5 + m.x270) - m.x1) + 3.4522550380804e-7*log(100 + 0.77*m.x271*(3115.6025 + m.x1)/(1.97501965793709e-5 + m.x271 ) - m.x1) + 3.94531616281732e-6*log(100 + 0.77*m.x272*(3115.6025 + m.x1)/(8.19553518254294e-5 + m.x272) - m.x1) + 1.22200831578924e-6*log(100 + 0.77*m.x273*(3115.6025 + m.x1)/( 0.000407989968975871 + m.x273) - m.x1) + 2.89509163321568e-6*log(100 + 0.77*m.x274*(3115.6025 + m.x1)/(6.27016276523772e-5 + m.x274) - m.x1) + 7.7429509727645e-7*log(100 + 0.77*m.x275*( 3115.6025 + m.x1)/(5.03666298358569e-5 + m.x275) - m.x1) + 1.83440298713939e-6*log(100 + 0.77* m.x276*(3115.6025 + m.x1)/(1.27502988988239e-5 + m.x276) - m.x1) + 3.6588151666147e-7*log(100 + 0.77*m.x277*(3115.6025 + m.x1)/(2.8401873761961e-5 + m.x277) - m.x1) + 1.61629826545444e-6*log( 100 + 0.77*m.x278*(3115.6025 + m.x1)/(4.72929188044174e-5 + m.x278) - m.x1) + 5.0062656239579e-7* log(100 + 0.77*m.x279*(3115.6025 + m.x1)/(4.22570765661979e-5 + m.x279) - m.x1) + 1.18604735082027e-6*log(100 + 0.77*m.x280*(3115.6025 + m.x1)/(4.77954191731146e-5 + m.x280) - m.x1) + 3.1772767253625e-7*log(100 + 0.77*m.x281*(3115.6025 + m.x1)/(5.01843576053702e-5 + m.x281 ) - m.x1) + 7.5273658626615e-7*log(100 + 0.77*m.x282*(3115.6025 + m.x1)/(9.34980947428967e-5 + m.x282) - m.x1) + 1.5053891354575e-7*log(100 + 0.77*m.x283*(3115.6025 + m.x1)/( 3.36992156840782e-5 + m.x283) - m.x1) + 4.56290882737626e-6*log(100 + 0.77*m.x284*(3115.6025 + m.x1)/(3.38925031813217e-5 + m.x284) - m.x1) + 1.08101058154415e-5*log(100 + 0.77*m.x285*( 3115.6025 + m.x1)/(1.02759479703261e-5 + m.x285) - m.x1) + 2.12979630638549e-6*log(100 + 0.77* m.x286*(3115.6025 + m.x1)/(8.03244443885601e-5 + m.x286) - m.x1) + 1.17095129086209e-6*log(100 + 0.77*m.x287*(3115.6025 + m.x1)/(3.38851989402637e-6 + m.x287) - m.x1) + 1.8183738183868e-7*log( 193.364173093206 - 0.970033348896977*m.x1) + 8.3690639350379e-7*log(115.254735565526 - 0.995103760648052*m.x1) + 6.1319483935361e-7*log(176.825857180065 - 0.975341572880345*m.x1) + 1.8992880179492e-7*log(109.471992936955 - 0.996959819830368*m.x1) + 4.4996525907646e-7*log( 159.35999423299 - 0.980947507189062*m.x1) + 3.9818386942891e-7*log(291.087251996003 - 0.938667640690363*m.x1) + 2.9174623809521e-7*log(406.623520914578 - 0.901584518270679*m.x1) + 9.036444975734e-8*log(201.277457067861 - 0.967493460071411*m.x1) + 2.1408476092296e-7*log( 311.19442233872 - 0.932213938607791*m.x1) + 2.53568846505985e-6*log(283.883103312431 - 0.940979921760741*m.x1) + 7.8539517293792e-7*log(112.566524343699 - 0.995966582918168*m.x1) + 1.86070011997164e-6*log(170.777277381972 - 0.97728295654469*m.x1) + 4.9093055416493e-7*log( 223.415319381483 - 0.960387976520919*m.x1) + 1.16307634663593e-6*log(483.165743746284 - 0.877017127908235*m.x1) + 2.2697933812219e-7*log(162.956712613236 - 0.979793085731175*m.x1) + 3.349525149487e-8*log(260.157811542521 - 0.948594914934585*m.x1) + 2.454170471418e-8*log( 319.709287061812 - 0.929480963293035*m.x1) + 7.60146450642e-9*log(310.18391133055 - 0.932538277482269*m.x1) + 1.800882969332e-8*log(559.688690006188 - 0.852455924654641*m.x1) + 2.0580903229556e-7*log(229.632746466765 - 0.958392398752163*m.x1) + 6.374656391292e-8*log( 127.215287247597 - 0.991264839706735*m.x1) + 1.5102363989344e-7*log(266.673373771106 - 0.946503646157972*m.x1) + 4.039142063785e-8*log(304.020979998872 - 0.934516364010212*m.x1) + 9.569238257287e-8*log(744.280921668034 - 0.793208240888228*m.x1) + 1.908635905751e-8*log( 439.474870738743 - 0.891040377988289*m.x1) + 8.431486050452e-8*log(316.086561642226 - 0.930643732105676*m.x1) + 2.611538951407e-8*log(339.269581293755 - 0.923202789414325*m.x1) + 6.187064545791e-8*log(314.017393171395 - 0.931307863191343*m.x1) + 1.657439407125e-8*log( 304.698766948572 - 0.934298817981892*m.x1) + 3.926681208795e-8*log(214.392130109138 - 0.96328410632963*m.x1) + 7.85292403475e-9*log(392.620228975356 - 0.9060790877606*m.x1) + 2.3802600639858e-7*log(391.153960662165 - 0.906549708872629*m.x1) + 5.6391359401217e-7*log( 850.86177688669 - 0.758999494676651*m.x1) + 1.1110169596417e-7*log(232.119855144384 - 0.957594123401691*m.x1) + 6.108315331197e-8*log(1491.69160837479 - 0.55331541543737*m.x1) + 1.75878843630284e-6*log(100 + 0.77*m.x288*(3115.6025 + m.x1)/(0.00320020232823025 + m.x288) - m.x1) + 8.09482226524927e-6*log(105.908941656468 - 0.998103435320627*m.x1) + 5.93101364389693e-6* log(100 + 0.77*m.x289*(3115.6025 + m.x1)/(0.00391700685565556 + m.x289) - m.x1) + 1.83705119893396e-6*log(103.663543949195 - 0.998824129859571*m.x1) + 4.35220572579398e-6*log(100 + 0.77*m.x290*(3115.6025 + m.x1)/(0.00510766177101759 + m.x290) - m.x1) + 3.85135981387583e-6* log(100 + 0.77*m.x291*(3115.6025 + m.x1)/(0.00149732917979604 + m.x291) - m.x1) + 2.82186151553773e-6*log(100 + 0.77*m.x292*(3115.6025 + m.x1)/(0.000884304042641739 + m.x292) - m.x1) + 8.7403342304542e-7*log(140.116885035848 - 0.987123875707556*m.x1) + 2.07069524479848e-6* log(100 + 0.77*m.x293*(3115.6025 + m.x1)/(0.00134243541922925 + m.x293) - m.x1) + 2.45259775812781e-5*log(100 + 0.77*m.x294*(3115.6025 + m.x1)/(0.00156106832778344 + m.x294) - m.x1) + 7.59658951379296e-6*log(104.864298820897 - 0.998438729324137*m.x1) + 1.79972776848313e-5* log(100 + 0.77*m.x295*(3115.6025 + m.x1)/(0.00426282619236897 + m.x295) - m.x1) + 4.74843496404409e-6*log(100 + 0.77*m.x296*(3115.6025 + m.x1)/(0.00238941147254734 + m.x296) - m.x1) + 1.12496407961671e-5*log(100 + 0.77*m.x297*(3115.6025 + m.x1)/(0.000681766258152549 + m.x297) - m.x1) + 2.19541565728847e-6*log(100 + 0.77*m.x298*(3115.6025 + m.x1)/( 0.00480845758341298 + m.x298) - m.x1) + 3.2397662353331e-7*log(164.437067242418 - 0.979317943401824*m.x1) + 2.3737509868434e-7*log(189.824673314027 - 0.971169405174753*m.x1) + 7.352375918346e-8*log(185.708911713758 - 0.972490421446973*m.x1) + 1.7418707361316e-7*log( 301.02255994106 - 0.935478752523449*m.x1) + 1.99064979064228e-6*log(100 + 0.77*m.x299*(3115.6025 + m.x1)/(0.00226859558939085 + m.x299) - m.x1) + 6.1657684646796e-7*log(110.574382290427 - 0.996605991203812*m.x1) + 1.46074821781472e-6*log(100 + 0.77*m.x300*(3115.6025 + m.x1)/( 0.00173563571837019 + m.x300) - m.x1) + 3.9067854379205e-7*log(183.057292642768 - 0.973341498909836*m.x1) + 9.2556686754731e-7*log(100 +
<filename>lib/trainer.py import os import time import glob import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .utils import get_latest_file, iterate_minibatches, check_numpy, process_in_chunks from .nn_utils import to_one_hot from collections import OrderedDict from copy import deepcopy from tensorboardX import SummaryWriter from apex import amp import json from os.path import join as pjoin, exists as pexists import argparse from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts from sklearn.metrics import roc_auc_score, log_loss from . import nn_utils, arch class Trainer(nn.Module): def __init__(self, model, experiment_name=None, warm_start=False, Optimizer=torch.optim.Adam, optimizer_params={}, lr=0.01, lr_warmup_steps=-1, verbose=False, n_last_checkpoints=1, step_callbacks=[], fp16=0, problem='classification', pretraining_ratio=0.15, masks_noise=0.1, opt_only_last_layer=False, freeze_steps=0, **kwargs): """ :type model: torch.nn.Module :param experiment_name: a path where all logs and checkpoints are saved :param warm_start: when set to True, loads last checpoint :param Optimizer: function(parameters) -> optimizer :param verbose: when set to True, produces logging information :param problem : str Problem type. Chosen from ['classification', 'regression', 'pretrain'] :param pretraining_ratio : float Between 0 and 1, percentage of feature to mask for reconstruction. Only used when problem == 'pretrain' """ super().__init__() self.model = model self.verbose = verbose self.lr = lr self.lr_warmup_steps = lr_warmup_steps # When using fp16, there are some params if not filtered out by requires_grad # will produce error params = [p for p in self.model.parameters() if p.requires_grad] if opt_only_last_layer: print('Only optimize last layer!') params = [self.model.last_w] self.opt = Optimizer(params, lr=lr, **optimizer_params) self.step = 0 self.n_last_checkpoints = n_last_checkpoints self.step_callbacks = step_callbacks self.fp16 = fp16 self.problem = problem self.pretraining_ratio = pretraining_ratio self.masks_noise = masks_noise self.opt_only_last_layer = opt_only_last_layer self.freeze_steps = freeze_steps if problem.startswith('pretrain'): # Don't do freeze when pretraining self.freeze_steps = 0 if problem == 'classification': # In my datasets I only have binary classification self.loss_function = \ (lambda x, y: F.binary_cross_entropy_with_logits(x, y.float())) elif problem == 'regression': self.loss_function = F.mse_loss elif problem.startswith('pretrain'): # Not used self.loss_function = None else: raise NotImplementedError() if experiment_name is None: experiment_name = 'untitled_{}.{:0>2d}.{:0>2d}_{:0>2d}:{:0>2d}'.format(*time.gmtime()[:5]) if self.verbose: print('using automatic experiment name: ' + experiment_name) self.experiment_path = pjoin('logs/', experiment_name) # if not warm_start and experiment_name != 'debug': # assert not os.path.exists(self.experiment_path), 'experiment {} already exists'.format(experiment_name) # self.writer = SummaryWriter(self.experiment_path, comment=experiment_name) if fp16: self.model, self.opt = amp.initialize( self.model, self.opt, opt_level='O1') if warm_start: self.load_checkpoint() def save_checkpoint(self, tag=None, path=None, mkdir=True, **kwargs): assert tag is None or path is None, "please provide either tag or path or nothing, not both" if tag is None and path is None: tag = "temp_{}".format(self.step) if path is None: path = pjoin(self.experiment_path, "checkpoint_{}.pth".format(tag)) if mkdir: os.makedirs(os.path.dirname(path), exist_ok=True) # Sometimes happen there is a checkpoint already existing. Then overwrite! if pexists(path): os.remove(path) torch.save(OrderedDict([ ('model', self.model.state_dict(**kwargs)), ('opt', self.opt.state_dict()), ('step', self.step), ] + ([] if not self.fp16 else [('amp', amp.state_dict())])), path) if self.verbose: print("Saved " + path) return path def load_checkpoint(self, tag=None, path=None, **kwargs): assert tag is None or path is None, "please provide either tag or path or nothing, not both" if tag is None and path is None: path = self.get_latest_file(pjoin(self.experiment_path, 'checkpoint_temp_[0-9]*.pth')) if path is None: return self elif tag is not None and path is None: path = pjoin(self.experiment_path, "checkpoint_{}.pth".format(tag)) checkpoint = torch.load(path) self.model.load_state_dict(checkpoint['model'], **kwargs) self.opt.load_state_dict(checkpoint['opt']) self.step = int(checkpoint['step']) if self.fp16 and 'amp' in checkpoint: amp.load_state_dict(checkpoint['amp']) # Set the temperature for c in self.step_callbacks: c(self.step) if self.verbose: print('Loaded ' + path) return self def get_latest_file(self, pattern): path = get_latest_file(pattern) if path is None: return None # File not saved correctly if os.stat(path).st_size == 0 \ or len(glob.glob(pattern)) > self.n_last_checkpoints: os.remove(path) path = self.get_latest_file(pattern) return path def average_checkpoints(self, tags=None, paths=None, out_tag='avg', out_path=None): assert tags is None or paths is None, "please provide either tags or paths or nothing, not both" assert out_tag is not None or out_path is not None, "please provide either out_tag or out_path or both, not nothing" if tags is None and paths is None: paths = self.get_latest_checkpoints( pjoin(self.experiment_path, 'checkpoint_temp_[0-9]*.pth'), self.n_last_checkpoints) elif tags is not None and paths is None: paths = [pjoin(self.experiment_path, 'checkpoint_{}.pth'.format(tag)) for tag in tags] checkpoints = [torch.load(path) for path in paths] averaged_ckpt = deepcopy(checkpoints[0]) for key in averaged_ckpt['model']: values = [ckpt['model'][key] for ckpt in checkpoints] averaged_ckpt['model'][key] = sum(values) / len(values) if out_path is None: out_path = pjoin(self.experiment_path, 'checkpoint_{}.pth'.format(out_tag)) torch.save(averaged_ckpt, out_path) def get_latest_checkpoints(self, pattern, n_last=None): list_of_files = glob.glob(pattern) if len(list_of_files) == 0: return [] assert len(list_of_files) > 0, "No files found: " + pattern return sorted(list_of_files, key=os.path.getctime, reverse=True)[:n_last] def remove_old_temp_checkpoints(self, number_ckpts_to_keep=None): if number_ckpts_to_keep is None: number_ckpts_to_keep = self.n_last_checkpoints paths = self.get_latest_checkpoints(pjoin(self.experiment_path, 'checkpoint_temp_[0-9]*.pth')) paths_to_delete = paths[number_ckpts_to_keep:] for ckpt in paths_to_delete: os.remove(ckpt) def train_on_batch(self, *batch, device, update=True): # Tune temperature in choice function for c in self.step_callbacks: c(self.step) # Tune the learning rate if self.lr_warmup_steps > 0 and self.step < self.lr_warmup_steps: cur_lr = self.lr * (self.step + 1) / self.lr_warmup_steps self.set_lr(cur_lr) if self.freeze_steps > 0 and self.step == 0 and update: self.model.freeze_all_but_lastw() if 0 < self.freeze_steps == self.step: self.model.unfreeze() x_batch, y_batch = batch x_batch = torch.as_tensor(x_batch, device=device) if not self.problem.startswith('pretrain'): # Save some memory y_batch = torch.as_tensor(y_batch, device=device) self.model.train() # Read that it's faster... for group in self.opt.param_groups: for p in group['params']: p.grad = None # self.opt.zero_grad() if not self.problem.startswith('pretrain'): # Normal training logits, penalty = self.model(x_batch, return_outputs_penalty=True) loss = self.loss_function(logits, y_batch).mean() else: x_masked, masks, masks_noise = self.mask_input(x_batch) feature_masks = masks_noise if self.problem == 'pretrain_recon2' else None outputs, penalty = self.model(x_masked, return_outputs_penalty=True, feature_masks=feature_masks) loss = self.pretrain_loss(outputs, masks, x_batch) loss += penalty if self.fp16: with amp.scale_loss(loss, self.opt) as scaled_loss: scaled_loss.backward() else: loss.backward() if update: self.opt.step() self.step += 1 # self.writer.add_scalar('train loss', loss.item(), self.step) return {'loss': loss.item()} def mask_input(self, x_batch): masks = torch.bernoulli( self.pretraining_ratio * torch.ones(x_batch.shape) ).to(x_batch.device) infills = 0. # if self.problem == 'pretrain_mask': # # Use marginal dist (Gaussian) to in-fill. # infills = torch.normal(0, 1, size=masks.shape).to(x_batch.device) # To make it more difficult, 10% of the time we do not mask the inputs! # Similar to BERT tricks. new_masks = masks if self.masks_noise > 0.: new_masks = torch.bernoulli((1. - self.masks_noise) * masks) x_batch = (1. - new_masks) * x_batch + new_masks * infills return x_batch, masks, new_masks def pretrain_loss(self, outputs, masks, targets): if self.problem.startswith('pretrain_recon'): nb_masks = torch.sum(masks, dim=1, keepdim=True) nb_masks[nb_masks == 0] = 1 loss = (((outputs - targets) * masks) ** 2) / nb_masks loss = torch.mean(loss) # elif self.problem == 'pretrain_mask': # # BCE loss to predict if that token is the mask. And set target as 0.9 # loss = F.binary_cross_entropy_with_logits( # outputs, (1. - self.masks_noise) * masks) else: raise NotImplementedError('Unknown problem: ' + self.problem) return loss def evaluate_pretrain_loss(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) self.model.train(False) with torch.no_grad(): if self.problem.startswith('pretrain_recon'): # no mask outputs = process_in_chunks(self.model, X_test, batch_size=batch_size) loss = (((outputs - X_test)) ** 2) loss = torch.mean(loss) # elif self.problem == 'pretrain_mask': # X_masked, masks, _ = self.mask_input(X_test) # outputs = process_in_chunks(self.model, X_masked, batch_size=batch_size) # loss = self.pretrain_loss(outputs, masks, X_test) else: raise NotImplementedError('Unknown problem: ' + self.problem) return loss.item() def evaluate_classification_error(self, X_test, y_test, device, batch_size=4096): ''' This is for evaluation of binary error ''' X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): logits = process_in_chunks(self.model, X_test, batch_size=batch_size) logits = check_numpy(logits) error_rate = (y_test != (logits >= 0)).mean() # error_rate = (y_test != np.argmax(logits, axis=1)).mean() return error_rate def evaluate_negative_auc(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): logits = process_in_chunks(self.model, X_test, batch_size=batch_size) logits = check_numpy(logits) # assert logits.shape[1] == 2, 'logits shape is not binary! %d' % logits.shape[1] # logit_diff = logits[:, 1] - logits[:, 0] auc = roc_auc_score(y_test, logits) return -auc def evaluate_mse(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): prediction = process_in_chunks(self.model, X_test, batch_size=batch_size) prediction = check_numpy(prediction) error_rate = ((y_test - prediction) ** 2).mean() error_rate = float(error_rate) # To avoid annoying JSON unserializable bug return error_rate def evaluate_multiple_mse(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): prediction = process_in_chunks(self.model, X_test, batch_size=batch_size) prediction = check_numpy(prediction) error_rate = ((y_test - prediction) ** 2).mean(axis=0) return error_rate.astype(float).tolist() def evaluate_logloss(self, X_test, y_test, device, batch_size=512): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): logits = F.softmax(process_in_chunks(self.model, X_test, batch_size=batch_size), dim=1) logits =
import os import sys import numpy as np import pickle from datetime import date from gtracr.lib._libgtracr import TrajectoryTracer, uTrajectoryTracer from gtracr.lib.trajectory_tracer import pTrajectoryTracer from gtracr.utils import particle_dict, location_dict, ymd_to_dec from gtracr.lib.constants import EARTH_RADIUS, DEG_PER_RAD, ELEMENTARY_CHARGE, KG_PER_GEVC2, RAD_PER_DEG, KG_M_S_PER_GEVC, ELEMENTARY_CHARGE CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = os.path.join(CURRENT_DIR, "data") class Trajectory: ''' Class that controls the trajectory of a particle at some given energy / rigidity Required Parameters ------------------- - zenith_angle : float the angle of the cosmic ray trajectory from the local zenith, with 0 being at the local zenith - azimuth_angle : float the angle of the cosmic ray trajectory with 0 being in the direction of the geographic North in the local tangent plane - energy : float the cosmic ray energy. Cannot be used concurrently with rigidity (default = None). - rigidity : float the cosmic ray rigidity. Cannot be used concurrently with energy (default = None). Optional Parameters -------------------- - particle_altitude : float the altitude in which the cosmic ray hits Earth's atmosphere and creates showers (default = 100km) - latitude : float the geographic latitude of the detector, with 0 defined at the equator in degrees - longitude : float the geographic longitude of the detector, with 0 defined at the Prime Meridian in degrees - detector_altitude : float the height of the detector from sea level in km (default = 0km) - location_name : str the location name as stored in location_dict (default = None). Available as an alternative option to initialize the location of the trajectory. - bfield_type : str the type of bfield to evaluate the trajectory with (either 'dipole' or 'igrf', default = igrf) - date : str the date in which the field is evaluated in (defaults to the current date). Date must be formatted in "yyyy-mm-dd" format. - plabel : str the label of the particle defined in particle_dict (default = "p+"). Available options are "p+", "p-", "e+", "e-". - escape_altitude : float the altitude in which the particle has "escaped" Earth in meters (default = 10 * RE) ''' def __init__(self, zenith_angle, azimuth_angle, energy=None, rigidity=None, particle_altitude=100., latitude=0., longitude=0., detector_altitude=0., location_name=None, bfield_type="igrf", date=str(date.today()), plabel="p+", escape_altitude=10. * EARTH_RADIUS): ''' Cosmic ray direction configurations ''' self.zangle = zenith_angle self.azangle = azimuth_angle self.palt = particle_altitude * (1e3) # convert to meters self.esc_alt = escape_altitude ''' Particle type configuration ''' # define particle from particle_dict self.particle = particle_dict[plabel] self.charge = self.particle.charge self.mass = self.particle.mass ''' Geodesic coordinate configuration ''' # only import location dictionary and use those values if location_name is not None if location_name is not None: # location_dict = set_locationdict() loc = location_dict[location_name] latitude = loc.latitude longitude = loc.longitude detector_altitude = loc.altitude self.lat = latitude self.lng = longitude self.dalt = detector_altitude * (1e3) # convert to meters self.start_alt = self.dalt + self.palt ''' Cosmic ray energy / rigidity / momentum configuration ''' # define rigidity and energy only if they are provided, evaluate for the other member # also set momentum in each case if rigidity is None and energy is not None: self.particle.set_from_energy(energy) self.rigidity = self.particle.rigidity self.energy = energy elif energy is None and rigidity is not None: self.particle.set_from_rigidity(rigidity) self.rigidity = rigidity self.energy = self.particle.get_energy_rigidity() # elif rigidity is None and energy is None: else: raise Exception( "Provide either energy or rigidity as input, not both!") ''' Magnetic Field Model configuration ''' # type of bfield to use # take only first character for compatibility with char in c++ self.bfield_type = bfield_type[0] # find the path to the data and set current date for igrf bfield datapath = os.path.abspath(DATA_DIR) # print(datapath) dec_date = ymd_to_dec(date) self.igrf_params = (datapath, dec_date) ''' Other set-ups ''' self.particle_escaped = False # check if trajectory is allowed or not # final time and six-vector, used for testing purposes self.final_time = 0. self.final_sixvector = np.zeros(6) # get the 6-vector for the particle, initially defined in # detector frame, and transform it to geocentric # coordinates self.particle_sixvector = self.detector_to_geocentric() def get_trajectory(self, dt=1e-5, max_time=1, max_step=None, get_data=False, use_python=False, use_unvectorized=False): ''' Evaluate the trajectory of the particle within Earth's magnetic field and determines whether particle has escaped or not. Optionally also returns the information of the trajectory (the duration and the six-vector in spherical coordinates) if `get_data == True`. Parameters ---------- dt : float the time step between each iteration of the integration (default: 1e-5) max_time : float the maximum duration in which the integration would occur in seconds (default: 10) max_step : int, optional maximum number of steps to integrate for (default None). If `max_step` is not `None`, then `max_step` will override the evaluation of maximum number of steps based on `max_time`. get_data : bool, optional decides whether we want to extract the information (time and six vector) for the whole trajectory for e.g. debugging purposes (default: False) use_python : bool, optional decides whether to use the python implementation for the TrajectoryTracer class instead of that implemented in C++. This is mainly enabled for debugging purposes (default: False) use_unvectorized : bool, optional decides whether to evaluate the Runge Kutta integration in the C++ version in its unvectorized or vectorized form. This is mainly enabled for debugging purposes (default: False) Returns --------- - trajdata_dict : dict a dictionary that contains the information of the whole trajectory in spherical coordinates. Keys are ["t", "r", "theta", "phi", "pr", "ptheta", "pphi"] - only returned when `get_data` is True ''' # evaluate max_step only when max_time is given, else use the user-given # max step max_step = int(np.ceil(max_time / dt)) if max_step is None else max_step # raise issues if both use python and use unvectorized form is True if use_python and use_unvectorized: raise Exception("Unvectorized Python version does not exist!") # start iteration process self.charge *= ELEMENTARY_CHARGE self.mass *= KG_PER_GEVC2 # initialize trajectory tracer if use_python: # the python trajectory tracer version traj_tracer = pTrajectoryTracer(self.charge, self.mass, self.start_alt, self.esc_alt, dt, max_step, self.bfield_type, self.igrf_params) elif use_unvectorized: # the unvectorized trajectory tracer version # error prone, possible memory leaks so better not to use it traj_tracer = uTrajectoryTracer(self.charge, self.mass, self.start_alt, self.esc_alt, dt, max_step, self.bfield_type, self.igrf_params) else: # the vectorized trajectory tracer version traj_tracer = TrajectoryTracer(self.charge, self.mass, self.start_alt, self.esc_alt, dt, max_step, self.bfield_type, self.igrf_params) # set initial values particle_t0 = 0. particle_vec0 = self.particle_sixvector if get_data: # evaluate the trajectory tracer # get data dictionary of the trajectory trajectory_datadict = traj_tracer.evaluate_and_get_trajectory( particle_t0, particle_vec0) # convert all data to numpy arrays for computations etc # this should be done within C++ in future versions for key, arr in list(trajectory_datadict.items()): trajectory_datadict[key] = np.array(arr) # add the Cartesian components to the dictionary # for plotting purposes self.convert_to_cartesian(trajectory_datadict) # lastly get the boolean of if the particle has escaped or not # in binary format # this helps with the geomagnetic cutoff procedure # alternatively this can be inside the geomagnetic things self.particle_escaped = traj_tracer.particle_escaped # set the final time and six-vector from the evaluator self.final_time = traj_tracer.final_time self.final_sixvector = np.array(traj_tracer.final_sixvector) return trajectory_datadict else: # simply evaluate without returning the dictionary traj_tracer.evaluate(particle_t0, particle_vec0) # lastly get the boolean of if the particle has escaped or not # in binary format # this helps with the geomagnetic cutoff procedure # alternatively this can be inside the geomagnetic things self.particle_escaped = traj_tracer.particle_escaped # set the final time and six-vector from the evaluator self.final_time = traj_tracer.final_time self.final_sixvector = np.array(traj_tracer.final_sixvector) return None def convert_to_cartesian(self, trajectory_data): r_arr = trajectory_data["r"] / EARTH_RADIUS theta_arr = trajectory_data["theta"] phi_arr = trajectory_data["phi"] # convert to cartesian & add to dict trajectory_data["x"] = r_arr * np.sin(theta_arr) * np.cos(phi_arr) trajectory_data["y"] = r_arr * np.sin(theta_arr) * np.sin(phi_arr) trajectory_data["z"] = r_arr * np.cos(theta_arr) # get the initial trajectory points based on the latitude, longitude, altitude, zenith, and azimuth # returns tuple of 2 trajectory points (the initial one and the first one relating to that of the zenith and azimuth one) def detector_to_geocentric(self): ''' Convert the coordinates defined in the detector
# -*- coding: utf-8 -*- """ Created on Fri Nov 24 16:52:00 2017 @author: Paul """ import numpy as np from skimage import measure from collections import defaultdict import PySkelFrac.classes as c from scipy import ndimage import copy import time import cv2 def NewAssociatedContours(AllContours,AllArcs,AllPlaces,AllVoies,p,Exterior=False): """ For every Contours, we look at the other structures associated """ print('\n### Associations between classes ### ### ### ###') #print('Association of Ways with contours...') t=time.time() # PTCont contains all Contours linked to the point #print('All Contours Linked to a point') PtCont=defaultdict(list) for S in AllArcs.Segments: PtCont[int(S[0])].extend(S[2:4].astype(np.int)) PtCont[int(S[1])].extend(S[2:4].astype(np.int)) for key,value in PtCont.items(): PtCont[key]=list(set(value)) #Now for every arcs we want to do the list of Contours around it #print('All Contours linked to an arc') for i,Al in enumerate(AllArcs.Arclist): for Pt in Al[1:-1] : AllArcs.list[i].Contours.extend(PtCont[Pt]) for i,A in enumerate(AllArcs.list): A.Contours=list(set(A.Contours)) for C in A.Contours: AllContours.list[C].Arcs.append(i) #Now for every voie we to the list of Contours around it #print('All Contours linked to a voie') for V in AllVoies.list: for A in V.Arc: V.Contours.extend(AllArcs.list[A].Contours) for i,V in enumerate(AllVoies.list): V.Contours=list(set(V.Contours)) for C in V.Contours: AllContours.list[C].Voies.append(i) #print('All theses link to the contours') for i,C in enumerate(AllContours.list): for A in C.Arcs: if min(AllArcs.list[A].Connectivitypts)>=2: AllContours.list[i].Points.extend(AllArcs.list[A].Vertices) else : AllContours.list[i].ArcsInside.append(A) #print('Calculation of new contours') #for i,C in enumerate(AllContours.list): #if C.Points: # if (i==AllContours.labmax and Exterior): # AllContours.list[i].Points=ReorganizePolygon(C.Points,AllArcs.Vertices) # elif i!=AllContours.labmax: # AllContours.list[i].Points=ReorganizePolygon(C.Points,AllArcs.Vertices) # AllContours.list[i].PointsXY=AllArcs.Vertices[ AllContours.list[i].Points,0:2] #else: #AllContours.list[i].Points=[] #AllContours.list[i].PointsXY=[] for A in AllArcs .list: A.Contours=[] for V in AllVoies .list: C.Voies =[] for i,V in enumerate(AllVoies.list): V.VoiesLink=[] V.ArcsLink =[] for Pid in V.Places: P=AllPlaces.list[Pid] for E in P.Extremities: V.VoiesLink.append(AllArcs.list[E[0]].Usedinvoie) V.ArcsLink. append(E[0]) P.Voie=i for i,C in enumerate(AllContours.list): for Aid in C.Arcs: C.Voies.append(AllArcs.list[Aid].Usedinvoie) AllArcs.list[Aid].Contours.append(i) C.Voies=list(set(C.Voies)) for i,C in enumerate(AllContours.list) : C.index=i for i,A in enumerate(AllArcs .list) : A.index=i for i,P in enumerate(AllPlaces .list) : P.index=i for i,V in enumerate(AllVoies .list) : V.index=i ### CORRECT WAY IN THE PLACE for P in AllPlaces.list: if (len(P.Arcs)>2 and len(P.Links)): ind1 = AllArcs.list[P.Links[0][0]].Usedinvoie ind2 = AllArcs.list[P.Links[0][1]].Usedinvoie if ind1==ind2 : P.Voie = ind1 else : print("Place", P.index, "has a ill defined way") ### RIGHT DIRECTION (WAYS TO BRANCHES ) for i,V in enumerate(AllVoies.list): if ((V.FirstPt[0]-p['Pied'][0])**2 + (V.FirstPt[1]-p['Pied'][1])** 2) > (( V.LastPt[0]-p['Pied'][0])**2 + ( V.LastPt[1]-p['Pied'][1])** 2): V.lecture=-np.array(V.lecture)[::-1] V.Arc= V.Arc[::-1] V.Vertices = V.Vertices[::-1] V.XY = V.XY[::-1,:] V.FirstPlace, V.LastPlace = V.LastPlace, V.FirstPlace V.FirstPt , V.LastPt = V.LastPt , V.FirstPt del V.Arret, V.Arretes, V.Begin, V.CurvMax, V.Curvature, V.Filles, V.Heredite, V.Mere, V.VoiesNeighbor, V.FirstAng, V.LastAng ### CORRECT FIRST PLACE AND LASTPLACE A = AllArcs.list[V.Arc[0]] if AllPlaces.list[A.FirstPlace].Voie != i : V.FirstPlace=A.FirstPlace else : V.FirstPlace=A.LastPlace if len(AllPlaces.list[A.FirstPlace].Arcs)==1 : V.FirstPlace=A.FirstPlace if len(AllPlaces.list[A. LastPlace].Arcs)==1 : V.FirstPlace=A. LastPlace A = AllArcs.list[V.Arc[-1]] if AllPlaces.list[A.FirstPlace].Voie != i : V.LastPlace=A.FirstPlace else : V.LastPlace=A.LastPlace if len(AllPlaces.list[A.FirstPlace].Arcs)==1 : V.LastPlace=A.FirstPlace if len(AllPlaces.list[A. LastPlace].Arcs)==1 : V.LastPlace=A. LastPlace ### CORRECT VOIESLINKED TO REMOVE EXTREME PLACES V.ArcsLink = list(set([A for A in V.ArcsLink if A not in V.Arc])) ### LIEN FILLES-MORTES TOUTES V.VoiesLink = list(set([AllArcs.list[Ai].Usedinvoie for Ai in V.ArcsLink])) ### LIEN FILLES-MORTES TOUTES V.VoiesExtremes = [AllArcs.list[Ai[0]].Usedinvoie for Ai in AllPlaces.list[V.FirstPlace].Arcs]+[AllArcs.list[Ai[0]].Usedinvoie for Ai in AllPlaces.list[V.LastPlace].Arcs] V.VoiesIn = [V2 for V2 in V.VoiesLink if V2 not in V.VoiesExtremes ] if V.index==AllPlaces.list[V.FirstPlace].Voie: V.Mother = False else : V.Mother=AllPlaces.list[V.FirstPlace].Voie if V.index==AllPlaces.list[V.LastPlace ].Voie: V.Killer = False else : V.Killer=AllPlaces.list[V.LastPlace ].Voie V.Killed = [] V.Daughter = [] for V in AllVoies.list : if V.Mother : AllVoies.list[V.Mother].Daughter.append(V.index) if V.Killer : AllVoies.list[V.Killer].Killed .append(V.index) print('Done. t=',time.time()-t) return AllContours,AllArcs,AllPlaces,AllVoies def RegulatedContours2(IMG,AllContours,AllArcs,AllPlaces,AllVoies,p): NOWIDTH=c.Void() NOWIDTH.binary=np.zeros((IMG.Y,IMG.X,3),dtype=np.uint8) for A in AllVoies.list: pts=np.vstack(( A.XY[:,0], A.XY[:,1])).T.reshape(-1,2).astype(np.int32) cv2.polylines(NOWIDTH.binary, [pts], 0, (255,255,255),2) NOWIDTH.binary = NOWIDTH.binary[:,:,0].T # CARTE SANS DISTANCE #IMG.DistNoborder = cv2.distanceTransform(NOWIDTH.binary,cv2.DIST_L2,3) # DISTANCE DANS LA STRUCTURE ContoursNowidth= c.Contours(NOWIDTH,p) # CONTOURS ASSOCIES #IMG.HolesNoWidth = ((255-NOWIDTH.binary)*IMG.enveloppe).astype(np.uint8) # UNIQUEMENT LES TROUS #IMG.HolesDistNoWidth = cv2.distanceTransform(IMG.HolesNoWidth,cv2.DIST_L2,3) # DISTANCE DANS LES TROUS #IMG.NoWidth= NOWIDTH.binary ### CREATION DU REGULEE REGU=c.Void() REGU.binary=np.zeros((IMG.Y,IMG.X,3),dtype=np.uint8) for A in AllVoies.list: pts=np.vstack(( A.XY[:,0], A.XY[:,1])).T.reshape(-1,2).astype(np.int32) cv2.polylines(REGU.binary, [pts], 0, (255,255,255),int(p['branchwidth'])) REGU.binary = REGU.binary[:,:,0].T # <NAME> #IMG.DistRegu = cv2.distanceTransform(NOWIDTH.binary,cv2.DIST_L2,3) # DISTANCE DANS LA STRUCTURE ContoursRegu = c.Contours(REGU ,p) # CONTOURS ASSOCIES #IMG.HolesRegu= ((255-REGU.binary)*IMG.enveloppe).astype(np.uint8) # UNIQUEMENT LES TROUS #IMG.HolesDistRegu = cv2.distanceTransform(IMG.HolesRegu,cv2.DIST_L2,3) # DISTANCE DANS LES TROUS #IMG.Regu = REGU.binary """ for _ in range() AllContours.C_XY = [ C.Center for C in AllContours.list if not C.isexterior] ContoursRegu.C_XY = [ C.Center for C in ContoursRegu.list if not C.isexterior] ContoursNowidth.C_XY = [ C.Center for C in ContoursNowidth.list if not C.isexterior] from scipy.spatial import distance_matrix ALLDIST1 = distance_matrix(AllContours.C_XY,ContoursRegu.C_XY ) ALLDIST2 = distance_matrix(AllContours.C_XY,ContoursNowidth.C_XY ) ALLDIST3 = distance_matrix(ContoursRegu.C_XY,ContoursNowidth.C_XY ) MIN1 = np.argmin(ALLDIST1, axis=1) # Donne pour chaque trou de MIN11= np.argmin(ALLDIST1, axis=0) MIN2 = np.argmin(ALLDIST2, axis=1) MIN22= np.argmin(ALLDIST2, axis=0) for C in AllContours.list : C.Regcontours = False C.NoWContours = False for C in ContoursRegu.list : C.Taken = False for C in ContoursNowidth.list : C.Taken = False for i,val in enumerate(MIN1[MIN11]-np.linspace(0,len(MIN11)-1,len(MIN11))) : if val==0. : AllContours.list[i+1].RegContours= ContoursRegu.list[ MIN1[i]] ContoursRegu.list[ MIN1[i]].Taken = True for i,val in enumerate(MIN2[MIN22]-np.linspace(0,len(MIN22)-1,len(MIN22))) : if val==0. : AllContours.list[i+1].NoWcontours= ContoursNowidth.list[ MIN2[i]] ContoursNowidth.list[ MIN2[i]].Taken= True """ return IMG,ContoursNowidth,ContoursRegu,REGU.binary ################################################################################ ################################################################################ ############################################################################### def ContoursCaracteristics(IMG,AllContours,AllArcs,p): """ On ajoute quelques propriétés aux contours, notamment en supprimant les epaisseurs """ print('### ADDING NEW CONTOURS PROPERTIES ###') IMG.enveloppe = np.copy(IMG.binary).astype(np.int32) IMG.Holes = np.zeros((IMG.X,IMG.Y),dtype=np.int32) IMG.HolesDistRegu = np.zeros((IMG.X,IMG.Y),dtype=np.int32) IMG.HolesDistNoborder = np.zeros((IMG.X,IMG.Y),dtype=np.int32) IMG.regu = np.zeros((IMG.X,IMG.Y),dtype=np.int32) pourcent=0 t=0 print('Calculation off AllContours Properties...') for i,C in enumerate(AllContours.list): if i != AllContours.labmax: if 100*i/len(AllContours.list)>pourcent: pourcent+=1 print(10*'\r',end='') print(int(100*i/len(AllContours.list)),'%',end='') ### Classical Contours xmin=np.amin(C.XY[:,0]);xmax=np.amax(C.XY[:,0]) ymin=np.amin(C.XY[:,1]);ymax=np.amax(C.XY[:,1]) XY=np.copy(C.XY) XY[:,0]-=xmin XY[:,1]-=ymin XY=XY.reshape((-1,1,2)).astype(np.int32) IMG_POLY=np.zeros((int(xmax)-int(xmin)+2,(int(ymax)-int(ymin)+2)),dtype=np.int32) cv2.fillPoly(IMG_POLY,[XY],1) IMG_POLY=ndimage.distance_transform_edt(IMG_POLY) AllContours.list[i].DistMax = np.amax(IMG_POLY) if p.get('Polypsize',False): AllContours.list[i].SurfPolyp= sum(1 for e in np.reshape(IMG_POLY,(len(IMG_POLY[:,0])*len(IMG_POLY[0,:]),1)) if e > p['Polypsize']) #ContoursNoborder : C.Noborder=c.Void() CN=C.Noborder CN.XY = np.copy(AllArcs.Vertices[C.Points,0:2]) welldetermined=True if len(CN.XY)==0: CN.XY=C.XY welldetermined=False CN.perimeter= np.sum(np.sqrt( ( CN.XY[:,0]-np.roll(CN.XY[:,0],1) )**2 + ( CN.XY[:,1]-np.roll(CN.XY[:,1],1) )**2 )) CN.surface = 0.5*np.abs(np.dot(CN.XY[:,0],np.roll(CN.XY[:,1],1))- np.dot(CN.XY[:,1],np.roll(CN.XY[:,0],1))) xmin=np.amin(CN.XY[:,0]);xmax=np.amax(CN.XY[:,0]) ymin=np.amin(CN.XY[:,1]);ymax=np.amax(CN.XY[:,1]) XY=copy.copy(CN.XY) XY[:,0]-=xmin XY[:,1]-=ymin XY=XY.reshape((-1,1,2)).astype(np.int32) IMG_POLY=np.zeros((int(xmax)-int(xmin)+2,(int(ymax)-int(ymin)+2)),dtype=np.int32) cv2.fillPoly(IMG_POLY,[XY],1) ### AJOUTER POUR CHAQUE A L'INTERIEUR # L'ENSEMBLE DES POINTS QUI LE CONSTITUENT : ON MET 0 avec LA LINE MAISON IMG_POLY=ndimage.distance_transform_edt(IMG_POLY) CN.DistMax = np.amax(IMG_POLY) if p.get('Polypsize',False): CN.SurfPolyp= sum(1 for e in np.reshape(IMG_POLY,(len(IMG_POLY[:,0])*len(IMG_POLY[0,:]),1)) if e > p['Polypsize']) #print(np.shape(IMG.HolesDistNoborder[int(xmin):int(xmin)+np.shape(IMG_POLY)[0]) ,int(ymin):int(ymin)+np.shape(IMG_POLY)[0] ]) ,np.shape(IMG_POLY.astype(np.int32) )) try: IMG.HolesDistNoborder[int(xmin):int(xmin)+np.shape(IMG_POLY)[0] ,int(ymin):int(ymin)+np.shape(IMG_POLY)[1] ]+=IMG_POLY.astype(np.int32) IMG.enveloppe [int(xmin):int(xmin)+np.shape(IMG_POLY)[0] ,int(ymin):int(ymin)+np.shape(IMG_POLY)[1] ]+=IMG_POLY.astype(np.int32) except BaseException: welldetermined=False #ContoursRegu : if p.get('branchwidth',False): C.Regulated=c.Void() CR=C.Regulated Regcont=measure.find_contours(IMG_POLY,p['branchwidth']/2) if len(Regcont)>1: lens=np.zeros(len(Regcont)) for z,elems in enumerate(Regcont): lens[z]=len(elems) CR.XY=Regcont[np.argmax(lens)] elif len(Regcont)==1: CR.XY=Regcont[0] if (len(Regcont)==0 or not welldetermined): CR.XY=[[None],[None]] CR.perimeter= 0 CR.surface = 0 CR.DistMax = 0 CR.DistMax = 0 else: CR.perimeter= np.sum(np.sqrt( ( CR.XY[:,0]-np.roll(CR.XY[:,0],1) )**2 + ( CR.XY[:,1]-np.roll(CR.XY[:,1],1) )**2 )) CR.surface = 0.5*np.abs(np.dot(CR.XY[:,0],np.roll(CR.XY[:,1],1))- np.dot(CR.XY[:,1],np.roll(CR.XY[:,0],1))) XY=CR.XY.astype(np.int32) IMG_POLY=np.zeros((int(xmax)-int(xmin)+2,(int(ymax)-int(ymin)+2)),dtype=np.int32) cv2.fillPoly(IMG_POLY,[XY],1) IMG_POLY=ndimage.distance_transform_edt(IMG_POLY) CR.DistMax = np.amax(IMG_POLY) if p.get('Polypsize',False): CR.SurfPolyp= sum(1 for e in np.reshape(IMG_POLY,(len(IMG_POLY[:,0])*len(IMG_POLY[0,:]),1)) if e > p['Polypsize']) IMG.HolesDistRegu[int(xmin):int(xmin)+len(IMG_POLY[:,0]) ,int(ymin):int(ymin)+len(IMG_POLY[0,:]) ]+=IMG_POLY.astype(np.int32) CR.XY[:,0]+=xmin CR.XY[:,1]+=ymin print('Done, t=',time.time()-t) pourcent=0 t=time.time() return IMG,AllContours def AllPropertiesCompilation(IMG,AllContours,AllArcs,p): """ On ajoute pleeeeeeeins de propriétés """ print('\n### PROPERTIES COMPILATION') print('Compilation of properties to "Images"...') pourcent=0 t=time.time() IMG.enveloppe[IMG.enveloppe>1]=1 IMG.enveloppe = ndimage.binary_fill_holes(IMG.enveloppe) IMG.distenveloppe = ndimage.distance_transform_edt(IMG.enveloppe) IMG.regu[IMG.HolesDistRegu>1]=1 IMG.regu = (1-IMG.regu)*IMG.enveloppe IMG.distregu = ndimage.distance_transform_edt(IMG.regu) IMG.Holes = (1-IMG.binary)*IMG.enveloppe IMG.HolesDist = ndimage.distance_transform_edt( IMG.Holes ) IMG.Holesregu = (1-IMG.regu)*IMG.enveloppe IMG.HolesDistRegu = ndimage.distance_transform_edt( IMG.Holesregu ) print('Done, t=',time.time()-t) if not p.get('stayinint32',False): print('Conversion of IMG arry in int16...') IMG.enveloppe =IMG.enveloppe.astype(np.int16) IMG.distenveloppe=IMG.distenveloppe.astype(np.int16) IMG.Regu =IMG.regu.astype(np.int16) IMG.DistRegu =IMG.distregu.astype(np.int16) IMG.Holes =IMG.Holes.astype(np.int16) IMG.HolesDist =IMG.HolesDist.astype(np.int16) IMG.Holesregu =IMG.Holesregu.astype(np.int16) IMG.HolesDistRegu=IMG.HolesDistRegu.astype(np.int16) IMG.HolesDistNoborder = IMG.HolesDistNoborder.astype(np.int16) IMG.binary = IMG.binary.astype(np.int16) IMG.dist = IMG.dist.astype(np.int16) print('Compilation of properties to "AllContours"...',end='') t=time.time() AllContours.Properlists= {} AllContours.Properlists['Perimetres'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['Surface'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['AspectRatio'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['DistMax'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['SsupDpolyps'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['ratiosurfac'] = np.zeros(AllContours.ncontours-1) if p.get('branchwidth',False): AllContours.Properlists['RegPerimetres'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['RegSurface'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['RegAspectRatio'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['RegDistMax'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['RegSsupDpolyps'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['Regratiosurfac'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['NoWPerimetres'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['NoWSurface'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['NoWAspectRatio'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['NoWDistMax'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['NoWSsupDpolyps'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['NoWratiosurfac'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['nbvoisins'] = np.zeros(AllContours.ncontours-1).astype(np.int32) AllContours.Properlists['Arcsinside'] = np.zeros(AllContours.ncontours-1).astype(np.int32) AllContours.Properlists['Voies'] = np.zeros(AllContours.ncontours-1).astype(np.int32) AllContours.Properlists['X'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['Y'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['Foot'] = np.zeros(AllContours.ncontours-1) AllContours.Properlists['Ext'] = np.zeros(AllContours.ncontours-1) index=0 for i,C in enumerate(AllContours.list[:]): if i!=AllContours.labmax: #print(i) if 100*i/len(AllContours.list)>pourcent: pourcent+=1 print(10*'\r',end='') print(int(100*i/len(AllContours.list)),'%',end='') AllContours.Properlists['Perimetres'] [index]=(C.perimeter ) AllContours.Properlists['Surface'] [index]=(C.surface ) AllContours.Properlists['AspectRatio'][index]=(np.sqrt(C.surface)/(C.perimeter*2*np.sqrt(np.pi)) ) AllContours.Properlists['DistMax'] [index]=(C.DistMax ) if p.get('Polypsize',False): AllContours.Properlists['SsupDpolyps'][index]=(C.SurfPolyp ) AllContours.Properlists['ratiosurfac'][index]=(C.SurfPolyp/C.surface ) if p.get('branchwidth',False): if C.Regulated.perimeter >0 : AllContours.Properlists['RegPerimetres'] [index]=(C.Regulated.perimeter ) AllContours.Properlists['RegSurface'] [index]=(C.Regulated.surface ) AllContours.Properlists['RegAspectRatio'][index]=(np.sqrt(C.Regulated.surface)/(C.Regulated.perimeter*2*np.sqrt(np.pi)) ) AllContours.Properlists['RegDistMax'] [index]=(C.Regulated.DistMax ) if p.get('Polypsize',False): AllContours.Properlists['RegSsupDpolyps'][index]=(C.Regulated.SurfPolyp ) AllContours.Properlists['Regratiosurfac'][index]=(C.Regulated.SurfPolyp/C.Regulated.surface ) else: AllContours.Properlists['RegPerimetres'] [index]=(0) AllContours.Properlists['RegSurface'] [index]=(0) AllContours.Properlists['RegAspectRatio'][index]=(0) AllContours.Properlists['RegDistMax'] [index]=(0) if p.get('Polypsize',False): AllContours.Properlists['RegSsupDpolyps'][index]=(0) AllContours.Properlists['Regratiosurfac'][index]=(0) AllContours.Properlists['NoWPerimetres'] [index]=(C.Noborder.perimeter ) AllContours.Properlists['NoWSurface'] [index]=(C.Noborder.surface ) AllContours.Properlists['NoWAspectRatio'][index]=(np.sqrt(C.Noborder.surface)/(C.Noborder.perimeter*2*np.sqrt(np.pi)) ) AllContours.Properlists['NoWDistMax'] [index]=(C.Noborder.DistMax ) if p.get('Polypsize',False): AllContours.Properlists['NoWSsupDpolyps'][index]=(C.Noborder.SurfPolyp ) AllContours.Properlists['NoWratiosurfac'][index]=(C.Noborder.SurfPolyp/C.Noborder.surface ) AllContours.Properlists['nbvoisins'] [index]=(len(C.Arcs) ) AllContours.Properlists['Arcsinside'] [index]=(len(C.ArcsInside) ) AllContours.Properlists['Voies'] [index]=(len(C.Voies)) AllContours.Properlists['X'] [index]=(C.Center[0] ) AllContours.Properlists['Y'] [index]=(C.Center[1] ) AllContours.Properlists['Foot'][index]=(np.sqrt( (C.Center[0] - p['Pied'][0]) ** 2 + (C.Center[1] - p['Pied'][1]) ** 2
<reponame>streamsets/datacollector-tests # Copyright 2021 StreamSets Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import string import pytest from pretenders.common.constants import FOREVER from streamsets.testframework.decorators import stub from streamsets.testframework.markers import http, sdc_min_version from streamsets.testframework.utils import get_random_string @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'BASIC', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'use_oauth_2': True}]) def test_additional_key_value_pairs_in_token_request_body(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'allow_extra_columns': False, 'data_format': 'DELIMITED', 'header_line': 'WITH_HEADER'}, {'allow_extra_columns': True, 'data_format': 'DELIMITED', 'header_line': 'WITH_HEADER'}]) def test_allow_extra_columns(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DATAGRAM', 'datagram_packet_format': 'COLLECTD'}]) def test_auth_file(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'BASIC'}, {'authentication_type': 'DIGEST'}, {'authentication_type': 'NONE'}, {'authentication_type': 'OAUTH'}, {'authentication_type': 'UNIVERSAL'}]) def test_authentication_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'avro_schema_location': 'INLINE', 'data_format': 'AVRO'}]) def test_avro_schema(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'avro_schema_location': 'INLINE', 'data_format': 'AVRO'}, {'avro_schema_location': 'REGISTRY', 'data_format': 'AVRO'}, {'avro_schema_location': 'SOURCE', 'data_format': 'AVRO'}]) def test_avro_schema_location(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'WHOLE_FILE'}]) def test_buffer_size_in_bytes(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_charset(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_default_cipher_suites': False, 'use_tls': True}]) def test_cipher_suites(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'NONE', 'credentials_grant_type': 'CLIENT_CREDENTIALS', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}]) def test_client_id(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'NONE', 'credentials_grant_type': 'CLIENT_CREDENTIALS', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}]) def test_client_secret(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM', 'enable_comments': True}]) def test_comment_marker(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'compression_format': 'ARCHIVE', 'data_format': 'BINARY'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'BINARY'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'BINARY'}, {'compression_format': 'NONE', 'data_format': 'BINARY'}, {'compression_format': 'ARCHIVE', 'data_format': 'DELIMITED'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'DELIMITED'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'DELIMITED'}, {'compression_format': 'NONE', 'data_format': 'DELIMITED'}, {'compression_format': 'ARCHIVE', 'data_format': 'JSON'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'JSON'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'JSON'}, {'compression_format': 'NONE', 'data_format': 'JSON'}, {'compression_format': 'ARCHIVE', 'data_format': 'LOG'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'LOG'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'LOG'}, {'compression_format': 'NONE', 'data_format': 'LOG'}, {'compression_format': 'ARCHIVE', 'data_format': 'PROTOBUF'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'PROTOBUF'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'PROTOBUF'}, {'compression_format': 'NONE', 'data_format': 'PROTOBUF'}, {'compression_format': 'ARCHIVE', 'data_format': 'SDC_JSON'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'SDC_JSON'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'SDC_JSON'}, {'compression_format': 'NONE', 'data_format': 'SDC_JSON'}, {'compression_format': 'ARCHIVE', 'data_format': 'TEXT'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'TEXT'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'TEXT'}, {'compression_format': 'NONE', 'data_format': 'TEXT'}, {'compression_format': 'ARCHIVE', 'data_format': 'XML'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'XML'}, {'compression_format': 'COMPRESSED_FILE', 'data_format': 'XML'}, {'compression_format': 'NONE', 'data_format': 'XML'}]) def test_compression_format(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_connect_timeout(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'OAUTH'}]) def test_consumer_key(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'OAUTH'}]) def test_consumer_secret(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'convert_hi_res_time_and_interval': False, 'data_format': 'DATAGRAM', 'datagram_packet_format': 'COLLECTD'}, {'convert_hi_res_time_and_interval': True, 'data_format': 'DATAGRAM', 'datagram_packet_format': 'COLLECTD'}]) def test_convert_hi_res_time_and_interval(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'BASIC', 'credentials_grant_type': 'CLIENT_CREDENTIALS', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'CLIENT_CREDENTIALS', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'CLIENT_CREDENTIALS', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'CLIENT_CREDENTIALS', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'RESOURCE_OWNER', 'use_oauth_2': True}]) def test_credentials_grant_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'TEXT', 'use_custom_delimiter': True}]) def test_custom_delimiter(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'LOG', 'log_format': 'LOG4J', 'use_custom_log_format': True}]) def test_custom_log4j_format(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'LOG', 'log_format': 'APACHE_CUSTOM_LOG_FORMAT'}]) def test_custom_log_format(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'AVRO'}, {'data_format': 'BINARY'}, {'data_format': 'DATAGRAM'}, {'data_format': 'DELIMITED'}, {'data_format': 'JSON'}, {'data_format': 'LOG'}, {'data_format': 'PROTOBUF'}, {'data_format': 'SDC_JSON'}, {'data_format': 'TEXT'}, {'data_format': 'XML'}]) def test_data_format(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DATAGRAM', 'datagram_packet_format': 'COLLECTD'}, {'data_format': 'DATAGRAM', 'datagram_packet_format': 'NETFLOW'}, {'data_format': 'DATAGRAM', 'datagram_packet_format': 'RAW_DATA'}, {'data_format': 'DATAGRAM', 'datagram_packet_format': 'SYSLOG'}]) def test_datagram_packet_format(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'http_method': 'DELETE'}, {'http_method': 'EXPRESSION'}, {'http_method': 'PATCH'}, {'http_method': 'POST'}, {'http_method': 'PUT'}]) def test_default_request_content_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'PROTOBUF', 'delimited_messages': False}, {'data_format': 'PROTOBUF', 'delimited_messages': True}]) def test_delimited_messages(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM'}]) def test_delimiter_character(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'XML'}]) def test_delimiter_element(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'delimiter_format_type': 'CSV'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'EXCEL'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'MULTI_CHARACTER'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'MYSQL'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'POSTGRES_CSV'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'POSTGRES_TEXT'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'RFC4180'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'TDF'}]) def test_delimiter_format_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM', 'enable_comments': False}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM', 'enable_comments': True}]) def test_enable_comments(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'enable_request_logging': False}, {'enable_request_logging': True}]) def test_enable_request_logging(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM'}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'MULTI_CHARACTER'}]) def test_escape_character(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'EXCEL', 'excel_header_option': 'IGNORE_HEADER'}, {'data_format': 'EXCEL', 'excel_header_option': 'NO_HEADER'}, {'data_format': 'EXCEL', 'excel_header_option': 'WITH_HEADER'}]) def test_excel_header_option(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DATAGRAM', 'datagram_packet_format': 'COLLECTD', 'exclude_interval': False}, {'data_format': 'DATAGRAM', 'datagram_packet_format': 'COLLECTD', 'exclude_interval': True}]) def test_exclude_interval(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'allow_extra_columns': True, 'data_format': 'DELIMITED', 'header_line': 'WITH_HEADER'}]) def test_extra_column_prefix(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'LOG', 'log_format': 'REGEX'}]) def test_field_path_to_regex_group_mapping(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'compression_format': 'ARCHIVE', 'data_format': 'BINARY'}, {'compression_format': 'ARCHIVE', 'data_format': 'DELIMITED'}, {'compression_format': 'ARCHIVE', 'data_format': 'JSON'}, {'compression_format': 'ARCHIVE', 'data_format': 'LOG'}, {'compression_format': 'ARCHIVE', 'data_format': 'PROTOBUF'}, {'compression_format': 'ARCHIVE', 'data_format': 'SDC_JSON'}, {'compression_format': 'ARCHIVE', 'data_format': 'TEXT'}, {'compression_format': 'ARCHIVE', 'data_format': 'XML'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'BINARY'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'DELIMITED'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'JSON'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'LOG'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'PROTOBUF'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'SDC_JSON'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'TEXT'}, {'compression_format': 'COMPRESSED_ARCHIVE', 'data_format': 'XML'}]) def test_file_name_pattern_within_compressed_directory(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'LOG', 'log_format': 'GROK'}]) def test_grok_pattern(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'LOG', 'log_format': 'GROK'}]) def test_grok_pattern_definition(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'header_output_location': 'HEADER'}]) def test_header_attribute_prefix(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'header_line': 'IGNORE_HEADER'}, {'data_format': 'DELIMITED', 'header_line': 'NO_HEADER'}, {'data_format': 'DELIMITED', 'header_line': 'WITH_HEADER'}]) def test_header_line(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'header_output_location': 'FIELD'}]) def test_header_output_field(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'header_output_location': 'FIELD'}, {'header_output_location': 'HEADER'}, {'header_output_location': 'NONE'}]) def test_header_output_location(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_headers(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'http_compression': 'GZIP'}, {'http_compression': 'NONE'}, {'http_compression': 'SNAPPY'}]) def test_http_compression(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'http_method': 'DELETE'}, {'http_method': 'EXPRESSION'}, {'http_method': 'GET'}, {'http_method': 'HEAD'}, {'http_method': 'PATCH'}, {'http_method': 'POST'}, {'http_method': 'PUT'}]) def test_http_method(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'http_method': 'EXPRESSION'}]) def test_http_method_expression(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DATAGRAM', 'ignore_control_characters': False}, {'data_format': 'DATAGRAM', 'ignore_control_characters': True}, {'data_format': 'DELIMITED', 'ignore_control_characters': False}, {'data_format': 'DELIMITED', 'ignore_control_characters': True}, {'data_format': 'JSON', 'ignore_control_characters': False}, {'data_format': 'JSON', 'ignore_control_characters': True}, {'data_format': 'LOG', 'ignore_control_characters': False}, {'data_format': 'LOG', 'ignore_control_characters': True}, {'data_format': 'TEXT', 'ignore_control_characters': False}, {'data_format': 'TEXT', 'ignore_control_characters': True}, {'data_format': 'XML', 'ignore_control_characters': False}, {'data_format': 'XML', 'ignore_control_characters': True}]) def test_ignore_control_characters(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM', 'ignore_empty_lines': False}, {'data_format': 'DELIMITED', 'delimiter_format_type': 'CUSTOM', 'ignore_empty_lines': True}]) def test_ignore_empty_lines(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'EXCEL', 'read_all_sheets': False}]) def test_import_sheets(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'TEXT', 'include_custom_delimiter': False, 'use_custom_delimiter': True}, {'data_format': 'TEXT', 'include_custom_delimiter': True, 'use_custom_delimiter': True}]) def test_include_custom_delimiter(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'data_format': 'XML', 'include_field_xpaths': False}, {'data_format': 'XML', 'include_field_xpaths': True}]) def test_include_field_xpaths(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'use_oauth_2': True}]) def test_jwt_claims(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS256', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS384', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS512', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'NONE', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS256', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS384', 'use_oauth_2': True}, {'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS512', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS256', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS384', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS512', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'NONE', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS256', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS384', 'use_oauth_2': True}, {'authentication_type': 'DIGEST', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS512', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS256', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS384', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS512', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'NONE', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS256', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS384', 'use_oauth_2': True}, {'authentication_type': 'NONE', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS512', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS256', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS384', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS512', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'NONE', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS256', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS384', 'use_oauth_2': True}, {'authentication_type': 'UNIVERSAL', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'RS512', 'use_oauth_2': True}]) def test_jwt_signing_algorithm(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'BASIC', 'credentials_grant_type': 'JWT', 'jwt_signing_algorithm': 'HS256', 'use_oauth_2': True}, {'authentication_type': 'BASIC',
<filename>tests/ampligraph/evaluation/test_protocol.py<gh_stars>0 # Copyright 2019 The AmpliGraph Authors. All Rights Reserved. # # This file is Licensed under the Apache License, Version 2.0. # A copy of the Licence is available in LICENCE, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # import numpy as np import pytest from ampligraph.latent_features import TransE, DistMult, ComplEx from ampligraph.evaluation import evaluate_performance, generate_corruptions_for_eval, \ generate_corruptions_for_fit, to_idx, create_mappings, mrr_score, hits_at_n_score, select_best_model_ranking, \ filter_unseen_entities from ampligraph.datasets import load_wn18, load_fb15k import tensorflow as tf from ampligraph.evaluation import train_test_split_no_unseen @pytest.mark.skip(reason="Speeding up jenkins") def test_select_best_model_ranking(): X = load_wn18() model_class = ComplEx param_grid = in_dict = { "batches_count": [500], "seed": 0, "epochs": [2000], "k": [10, 150], "eta": [10], "loss": ["nll"], "loss_params": { }, "embedding_model_params": { }, "regularizer": [None], "regularizer_params": { }, "optimizer": ["adagrad"], "optimizer_params": { "lr": [0.1, 0.01, 0.001] } } best_model, best_params, best_mrr_train, ranks_test, mrr_test = select_best_model_ranking(model_class, X, param_grid) print(type(best_model).__name__, best_params, best_mrr_train, mrr_test) assert best_params['k'] == 150 def test_select_best_model_ranking_inf_skip(): X = load_wn18() X['test'] = X['test'][::1000] model_class = ComplEx param_grid = in_dict = { "batches_count": [10], "seed": 0, "epochs": [1], "k": [150], "eta": [10], "loss": ["self_adversarial"], "loss_params": { }, "embedding_model_params": { }, "regularizer": [None], "regularizer_params": { }, "optimizer": ["adagrad"], "optimizer_params": { "lr": [1000, 0.1] }, 'verbose':True } best_model, best_params, best_mrr_train, ranks_test, mrr_test = select_best_model_ranking(model_class, X, param_grid) assert(best_params["optimizer_params"]["lr"] == 0.1) def test_evaluate_performance_default_protocol_without_filter(): wn18 = load_wn18() model = TransE(batches_count=10, seed=0, epochs=1, k=50, eta=10, verbose=True, embedding_model_params={'normalize_ent_emb':False, 'norm':1}, loss = 'self_adversarial', loss_params={'margin':1, 'alpha':0.5}, optimizer='adam', optimizer_params={'lr':0.0005}) model.fit(wn18['train']) from ampligraph.evaluation import evaluate_performance ranks_sep = [] from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='o', use_default_protocol=False) ranks_sep.extend(ranks) from ampligraph.evaluation import evaluate_performance from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='s', use_default_protocol=False) ranks_sep.extend(ranks) print('----------EVAL WITHOUT FILTER-----------------') print('----------Subj and obj corrupted separately-----------------') mr_sep = mr_score(ranks_sep) print('MAR:', mr_sep) print('Mrr:', mrr_score(ranks_sep)) print('hits10:', hits_at_n_score(ranks_sep, 10)) print('hits3:', hits_at_n_score(ranks_sep, 3)) print('hits1:', hits_at_n_score(ranks_sep, 1)) from ampligraph.evaluation import evaluate_performance from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='s+o', use_default_protocol=True) print('----------corrupted with default protocol-----------------') mr_joint = mr_score(ranks) mrr_joint = mrr_score(ranks) print('MAR:', mr_joint) print('Mrr:', mrr_score(ranks)) print('hits10:', hits_at_n_score(ranks, 10)) print('hits3:', hits_at_n_score(ranks, 3)) print('hits1:', hits_at_n_score(ranks, 1)) np.testing.assert_equal(mr_sep, mr_joint) assert(mrr_joint is not np.Inf) def test_evaluate_performance_default_protocol_with_filter(): wn18 = load_wn18() X_filter = np.concatenate((wn18['train'], wn18['valid'], wn18['test'])) model = TransE(batches_count=10, seed=0, epochs=1, k=50, eta=10, verbose=True, embedding_model_params={'normalize_ent_emb':False, 'norm':1}, loss = 'self_adversarial', loss_params={'margin':1, 'alpha':0.5}, optimizer='adam', optimizer_params={'lr':0.0005}) model.fit(wn18['train']) from ampligraph.evaluation import evaluate_performance ranks_sep = [] from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='o', use_default_protocol=False) ranks_sep.extend(ranks) from ampligraph.evaluation import evaluate_performance from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='s', use_default_protocol=False) ranks_sep.extend(ranks) print('----------EVAL WITH FILTER-----------------') print('----------Subj and obj corrupted separately-----------------') mr_sep = mr_score(ranks_sep) print('MAR:', mr_sep) print('Mrr:', mrr_score(ranks_sep)) print('hits10:', hits_at_n_score(ranks_sep, 10)) print('hits3:', hits_at_n_score(ranks_sep, 3)) print('hits1:', hits_at_n_score(ranks_sep, 1)) from ampligraph.evaluation import evaluate_performance from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='s+o', use_default_protocol=True) print('----------corrupted with default protocol-----------------') mr_joint = mr_score(ranks) mrr_joint = mrr_score(ranks) print('MAR:', mr_joint) print('Mrr:', mrr_joint) print('hits10:', hits_at_n_score(ranks, 10)) print('hits3:', hits_at_n_score(ranks, 3)) print('hits1:', hits_at_n_score(ranks, 1)) np.testing.assert_equal(mr_sep, mr_joint) assert(mrr_joint is not np.Inf) def test_evaluate_performance_so_side_corruptions_with_filter(): X = load_wn18() model = ComplEx(batches_count=10, seed=0, epochs=5, k=200, eta=10, loss='nll', regularizer=None, optimizer='adam', optimizer_params={'lr': 0.01}, verbose=True) model.fit(X['train']) ranks = evaluate_performance(X['test'][::20], model=model, verbose=True, use_default_protocol=False, corrupt_side='s+o') mrr = mrr_score(ranks) hits_10 = hits_at_n_score(ranks, n=10) print("ranks: %s" % ranks) print("MRR: %f" % mrr) print("Hits@10: %f" % hits_10) assert(mrr is not np.Inf) def test_evaluate_performance_so_side_corruptions_without_filter(): X = load_wn18() model = ComplEx(batches_count=10, seed=0, epochs=5, k=200, eta=10, loss='nll', regularizer=None, optimizer='adam', optimizer_params={'lr': 0.01}, verbose=True) model.fit(X['train']) X_filter = np.concatenate((X['train'], X['valid'], X['test'])) ranks = evaluate_performance(X['test'][::20], model, X_filter, verbose=True, use_default_protocol=False, corrupt_side='s+o') mrr = mrr_score(ranks) hits_10 = hits_at_n_score(ranks, n=10) print("ranks: %s" % ranks) print("MRR: %f" % mrr) print("Hits@10: %f" % hits_10) assert(mrr is not np.Inf) @pytest.mark.skip(reason="Speeding up jenkins") def test_evaluate_performance_nll_complex(): X = load_wn18() model = ComplEx(batches_count=10, seed=0, epochs=10, k=150, optimizer_params={'lr': 0.1}, eta=10, loss='nll', optimizer='adagrad', verbose=True) model.fit(np.concatenate((X['train'], X['valid']))) filter = np.concatenate((X['train'], X['valid'], X['test'])) ranks = evaluate_performance(X['test'][:200], model=model, filter_triples=filter, verbose=True) mrr = mrr_score(ranks) hits_10 = hits_at_n_score(ranks, n=10) print("ranks: %s" % ranks) print("MRR: %f" % mrr) print("Hits@10: %f" % hits_10) @pytest.mark.skip(reason="Speeding up jenkins") def test_evaluate_performance_TransE(): X = load_wn18() model = TransE(batches_count=10, seed=0, epochs=100, k=100, eta=5, optimizer_params={'lr': 0.1}, loss='pairwise', loss_params={'margin': 5}, optimizer='adagrad') model.fit(np.concatenate((X['train'], X['valid']))) filter = np.concatenate((X['train'], X['valid'], X['test'])) ranks = evaluate_performance(X['test'][:200], model=model, filter_triples=filter, verbose=True) # ranks = evaluate_performance(X['test'][:200], model=model) mrr = mrr_score(ranks) hits_10 = hits_at_n_score(ranks, n=10) print("ranks: %s" % ranks) print("MRR: %f" % mrr) print("Hits@10: %f" % hits_10) # TODO: add test condition (MRR raw for WN18 and TransE should be ~ 0.335 - check papers) def test_generate_corruptions_for_eval(): X = np.array([['a', 'x', 'b'], ['c', 'x', 'd'], ['e', 'x', 'f'], ['b', 'y', 'h'], ['a', 'y', 'l']]) rel_to_idx, ent_to_idx = create_mappings(X) X = to_idx(X, ent_to_idx=ent_to_idx, rel_to_idx=rel_to_idx) with tf.Session() as sess: all_ent = tf.constant(list(ent_to_idx.values()), dtype=tf.int64) x = tf.constant(np.array([X[0]]), dtype=tf.int64) x_n_actual, _ = sess.run(generate_corruptions_for_eval(x, all_ent)) x_n_expected = np.array([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 0, 5], [0, 0, 6], [0, 0, 7], [0, 0, 1], [1, 0, 1], [2, 0, 1], [3, 0, 1], [4, 0, 1], [5, 0, 1], [6, 0, 1], [7, 0, 1]]) np.testing.assert_array_equal(x_n_actual, x_n_expected) @pytest.mark.skip(reason="Needs to change to account for prime-product evaluation strategy") def test_generate_corruptions_for_eval_filtered(): x = np.array([0, 0, 1]) idx_entities = np.array([0, 1, 2, 3]) filter_triples = np.array(([1, 0, 1], [2, 0, 1])) x_n_actual = generate_corruptions_for_eval(x, idx_entities=idx_entities, filter=filter_triples) x_n_expected = np.array([[3, 0, 1], [0, 0, 0], [0, 0, 2], [0, 0, 3]]) np.testing.assert_array_equal(np.sort(x_n_actual, axis=0), np.sort(x_n_expected, axis=0)) @pytest.mark.skip(reason="Needs to change to account for prime-product evaluation strategy") def test_generate_corruptions_for_eval_filtered_object(): x = np.array([0, 0, 1]) idx_entities = np.array([0, 1, 2, 3]) filter_triples = np.array(([1, 0, 1], [2, 0, 1])) x_n_actual = generate_corruptions_for_eval(x, idx_entities=idx_entities, filter=filter_triples, side='o') x_n_expected = np.array([[0, 0, 0], [0, 0, 2], [0, 0, 3]]) np.testing.assert_array_equal(np.sort(x_n_actual, axis=0), np.sort(x_n_expected, axis=0)) def test_to_idx(): X = np.array([['a', 'x', 'b'], ['c', 'y', 'd']]) X_idx_expected = [[0, 0, 1], [2, 1, 3]] rel_to_idx, ent_to_idx = create_mappings(X) X_idx = to_idx(X, ent_to_idx=ent_to_idx, rel_to_idx=rel_to_idx) np.testing.assert_array_equal(X_idx, X_idx_expected) def test_filter_unseen_entities_with_strict_mode(): from collections import namedtuple base_model = namedtuple('test_model', 'ent_to_idx') X = np.array([['a', 'x', 'b'], ['c', 'y', 'd'], ['e', 'y', 'd']]) model = base_model({'a': 1, 'b': 2, 'c': 3, 'd': 4}) with pytest.raises(RuntimeError): _ = filter_unseen_entities(X, model, strict=True) def test_filter_unseen_entities_without_strict_mode(): from collections import namedtuple base_model = namedtuple('test_model', 'ent_to_idx') X = np.array([['a', 'x', 'b'], ['c', 'y', 'd'], ['e', 'y', 'd']]) model = base_model({'a': 1, 'b': 2, 'c': 3, 'd': 4}) X_filtered = filter_unseen_entities(X, model, strict=False) X_expected = np.array([['a', 'x', 'b'], ['c', 'y', 'd']]) np.testing.assert_array_equal(X_filtered, X_expected) # @pytest.mark.skip(reason="excluded to try out jenkins.") # TODO: re-enable this def test_generate_corruptions_for_fit_corrupt_side_so(): X = np.array([['a', 'x', 'b'], ['c', 'x', 'd'], ['e', 'x', 'f'], ['b', 'y', 'h'], ['a', 'y', 'l']]) rel_to_idx, ent_to_idx = create_mappings(X) X = to_idx(X, ent_to_idx=ent_to_idx, rel_to_idx=rel_to_idx) eta = 1 with tf.Session() as sess: all_ent = tf.squeeze(tf.constant(list(ent_to_idx.values()), dtype=tf.int32)) dataset = tf.constant(X, dtype=tf.int32) X_corr = sess.run(generate_corruptions_for_fit(dataset, eta=eta, corrupt_side='s+o', entities_size=len(X), rnd=0)) print(X_corr) # these values occur when seed=0 X_corr_exp = [[0, 0, 1], [2, 0, 3], [3, 0, 5], [1, 1, 0], [0, 1, 3]] np.testing.assert_array_equal(X_corr, X_corr_exp) def test_generate_corruptions_for_fit_curropt_side_s(): X = np.array([['a', 'x', 'b'], ['c', 'x', 'd'], ['e', 'x', 'f'], ['b', 'y', 'h'], ['a', 'y', 'l']]) rel_to_idx, ent_to_idx = create_mappings(X) X = to_idx(X, ent_to_idx=ent_to_idx, rel_to_idx=rel_to_idx) eta = 1 with tf.Session() as sess: all_ent = tf.squeeze(tf.constant(list(ent_to_idx.values()), dtype=tf.int32)) dataset = tf.constant(X, dtype=tf.int32) X_corr = sess.run(generate_corruptions_for_fit(dataset, eta=eta, corrupt_side='s', entities_size=len(X), rnd=0)) print(X_corr) # these values occur when seed=0 X_corr_exp = [[1, 0, 1], [3, 0, 3], [3, 0, 5], [0, 1, 6], [3, 1, 7]] np.testing.assert_array_equal(X_corr, X_corr_exp) def test_generate_corruptions_for_fit_curropt_side_o(): X = np.array([['a', 'x', 'b'], ['c', 'x', 'd'], ['e', 'x', 'f'], ['b', 'y', 'h'], ['a', 'y', 'l']]) rel_to_idx, ent_to_idx = create_mappings(X) X = to_idx(X, ent_to_idx=ent_to_idx, rel_to_idx=rel_to_idx) eta = 1 with tf.Session() as sess: all_ent = tf.squeeze(tf.constant(list(ent_to_idx.values()), dtype=tf.int32)) dataset = tf.constant(X, dtype=tf.int32) X_corr = sess.run(generate_corruptions_for_fit(dataset, eta=eta, corrupt_side='o', entities_size=len(X), rnd=0)) print(X_corr) # these values occur when seed=0 X_corr_exp = [[0, 0, 1], [2, 0, 3], [4, 0, 3], [1, 1, 0], [0, 1, 3]] np.testing.assert_array_equal(X_corr, X_corr_exp) def test_train_test_split(): # Graph X = np.array([['a', 'y', 'b'], ['a', 'y', 'c'], ['c', 'y', 'a'], ['d', 'y', 'e'], ['e', 'y', 'f'], ['f', 'y', 'c'], ['f', 'y', 'c']]) expected_X_train = np.array([['a', 'y', 'b'], ['c', 'y', 'a'],
<reponame>lresende/text-extensions-for-pandas # # Copyright (c) 2020 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ # conll.py # # I/O functions related to CONLL entity format and its many derivatives. from typing import * import numpy as np import pandas as pd import regex from text_extensions_for_pandas.array import ( TokenSpan, CharSpanArray, TokenSpanArray, ) # Special token that CoNLL-2003 format uses to delineate the documents in # the collection. _CONLL_DOC_SEPARATOR = "-DOCSTART-" # _PUNCT_REGEX = regex.compile(f"[{string.punctuation}]+") _PUNCT_OR_RIGHT_PAREN_REGEX = regex.compile( # Punctuation, right paren, or apostrophe followed by 1-2 lowercase letters # But not single or double quote, which could either begin or end a quotation '[!#%)*+,-./:;=>?@\\]^_`|}~]|\'[a-zA-Z]{1,2}') # Tokens that behave like left parentheses for whitespace purposes, # including dollar signs ("$100", not "$ 100") _LEFT_PAREN_REGEX = regex.compile(r"[(<\[{$]+") # _PUNCT_MATCH_FN = np.vectorize(lambda s: _PUNCT_REGEX.fullmatch(s) is not None) _SPACE_BEFORE_MATCH_FN = np.vectorize(lambda s: _PUNCT_OR_RIGHT_PAREN_REGEX.fullmatch(s) is not None) _SPACE_AFTER_MATCH_FN = np.vectorize(lambda s: _LEFT_PAREN_REGEX.fullmatch(s) is not None) def _make_empty_meta_values(column_names: List[str], iob_columns: List[bool]) \ -> Dict[str, List[str]]: ret = {} for i in range(len(column_names)): name = column_names[i] if not iob_columns[i]: ret[name] = [] else: ret[f"{name}_iob"] = [] ret[f"{name}_type"] = [] return ret class _SentenceData: """ Data structure that encapsulates one sentence's worth of data from a parsed CoNLL-2003 file. Not intended for use outside this file. """ def __init__(self, column_names: List[str], iob_columns: List[bool]): self._column_names = column_names self._iob_columns = iob_columns # Surface form of token self._tokens = [] # Type: List[str] # Metadata columns by name self._token_metadata = _make_empty_meta_values(column_names, iob_columns) @property def num_tokens(self) -> int: return len(self._tokens) @property def tokens(self) -> List[str]: return self._tokens @property def token_metadata(self) -> Dict[str, List[str]]: return self._token_metadata def add_line(self, line_num: int, line_elems: List[str]): """ :param line_num: Location in file, for error reporting :param line_elems: Fields of a line, pre-split """ if len(line_elems) != 1 + len(self._column_names) : raise ValueError(f"Unexpected number of elements {len(line_elems)} " f"at line {line_num}; expected " f"{1 + len(self._column_names)} elements.") token = line_elems[0] raw_tags = line_elems[1:] self._tokens.append(token) for i in range(len(raw_tags)): raw_tag = raw_tags[i] name = self._column_names[i] if not self._iob_columns[i]: # non-IOB data self._token_metadata[name].append(raw_tag) else: # IOB-format data; split into two values if raw_tag.startswith("I-") or raw_tag.startswith("B-"): # Tokens that are entities are tagged with tags like # "I-PER" or "B-MISC". tag, entity = raw_tag.split("-") elif raw_tag == "O": tag = raw_tag entity = None elif raw_tag == "-X-": # Special metadata value for -DOCSTART- tags in the CoNLL corpus. tag = "O" entity = None else: raise ValueError(f"Tag '{raw_tag}' of IOB-format field {i} at line " f"{line_num} does not start with 'I-', 'O', " f"or 'B-'.\n" f"Fields of line are: {line_elems}") self._token_metadata[f"{name}_iob"].append(tag) self._token_metadata[f"{name}_type"].append(entity) def _parse_conll_file(input_file: str, column_names: List[str], iob_columns: List[bool]) \ -> List[List[_SentenceData]]: """ Parse the CoNLL-2003 file format for training/test data to Python objects. The format is especially tricky, so everything here is straight non-vectorized Python code. If you want performance, write the contents of your CoNLL files back out into a file format that supports performance. :param input_file: Location of the file to read :param column_names: Names for the metadata columns that come after the token text. These names will be used to generate the names of the dataframe that this function returns. :param iob_columns: Mask indicating which of the metadata columns after the token text should be treated as being in IOB format. If a column is in IOB format, the returned data structure will contain *two* columns, holding IOB tags and entity type tags, respectively. For example, an input column "ent" will turn into output columns "ent_iob" and "ent_type". :returns: A list of lists of _SentenceData objects. The top list has one entry per document. The next level lists have one entry per sentence. """ with open(input_file, "r") as f: lines = f.readlines() # Build up a list of document metadata as Python objects docs = [] # Type: List[List[Dict[str, List[str]]]] current_sentence = _SentenceData(column_names, iob_columns) # Information about the current document sentences = [] # Type: SentenceData for i in range(len(lines)): line = lines[i].strip() if 0 == len(line): # Blank line is the sentence separator if current_sentence.num_tokens > 0: sentences.append(current_sentence) current_sentence = _SentenceData(column_names, iob_columns) else: # Not at the end of a sentence line_elems = line.split(" ") current_sentence.add_line(i, line_elems) if line_elems[0] == _CONLL_DOC_SEPARATOR and i > 0: # End of document. Wrap up this document and start a new one. # # Note that the special "start of document" token is considered part # of the document. If you do not follow this convention, the # result sets from CoNLL 2003 won't line up. # Note also that `current_sentence` is not in `sentences` and will be # added to the next document. docs.append(sentences) sentences = [] # Close out the last sentence and document, if needed if current_sentence.num_tokens > 0: sentences.append(current_sentence) if len(sentences) > 0: docs.append(sentences) return docs def _parse_conll_output_file(doc_dfs: List[pd.DataFrame], input_file: str ) -> List[Dict[str, List[str]]]: """ Parse the CoNLL-2003 file format for output data to Python objects. This format is similar to the format that `_parse_conll_file` produces, but without the token and document boundary information. :param doc_dfs: List of `pd.DataFrame`s of token information from the corresponding training data file, one `DataFrame` per document. Used for determining document boundaries, which are not encoded in CoNLL-2003 output file format. :param input_file: Location of the file to read :returns: A list of dicts. The top list has one entry per document. The next level contains lists under the following keys: * `iob`: List of IOB2 tags as strings. This function does **NOT** correct for the silly way that CoNLL-format uses "B" tags. See `_fix_iob_tags()` for that correction. * `entity`: List of entity tags where `iob` contains I's or B's. `None` everywhere else. """ with open(input_file, "r") as f: lines = f.readlines() # Build up a list of document metadata as Python objects docs = [] # Type: List[Dict[str, List[str]]] # Position in the corpus doc_num = 0 num_tokens_in_doc = len(doc_dfs[doc_num].index) token_num = 0 # Information about the current document's tokens iobs = [] # Type: List[str] entities = [] # Type: List[str] for i in range(len(lines)): line = lines[i].strip() if 0 == len(line): # Blank line is the sentence separator. continue if " " in line: raise ValueError(f"Line {i} contains unexpected space character.\n" f"Line was: '{line}'") raw_tag = line if raw_tag.startswith("I") or raw_tag.startswith("B"): # Tokens that are entities are tagged with tags like # "I-PER" or "B-MISC". tag, entity = raw_tag.split("-") elif raw_tag == "O": tag = raw_tag entity = None else: raise ValueError(f"Unexpected tag {raw_tag} at line {i}.\n" f"Line was: '{line}'") iobs.append(tag) entities.append(entity) token_num += 1 if token_num == num_tokens_in_doc: # End of current document, advance to next docs.append({ "iob": iobs, "entity": entities }) iobs = [] entities = [] doc_num += 1 token_num = 0 if doc_num < len(doc_dfs): num_tokens_in_doc = len(doc_dfs[doc_num].index) if doc_num < len(doc_dfs): print(f"WARNING: Corpus has {len(doc_dfs)} documents, but " f"only found outputs for {doc_num} of them.") # raise ValueError(f"Corpus has {len(doc_dfs)} documents, but " # f"only found outputs for {doc_num} of them.") return docs def _iob_to_iob2(df: pd.DataFrame, column_names: List[str], iob_columns: List[bool]) -> pd.DataFrame: """ In CoNLL-2003 format, entities are stored in IOB format, where the first token of an entity is only tagged "B" when there are two entities of the same type back-to-back. This format makes downstream processing difficult. If a given position has an `I` tag, that position may or may not be the first token of an entity. Code will need to inspect both the I/O/B tags *and* the entity type of multiple other tokens *and* the boundaries between sentences to disambiguate between those two
The parameters used at the time that the task ran. - **Status** *(string) --* The task status for an invocation. - **StatusDetails** *(string) --* The details explaining the status. Details are only available for certain status values. - **StartTime** *(datetime) --* The time that the task started running on the target. - **EndTime** *(datetime) --* The time that the task finished running on the target. - **OwnerInformation** *(string) --* User-provided value to be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window. - **WindowTargetId** *(string) --* The Maintenance Window target ID. :type WindowExecutionId: string :param WindowExecutionId: **[REQUIRED]** The ID of the Maintenance Window execution for which the task is a part. :type TaskId: string :param TaskId: **[REQUIRED]** The ID of the specific task in the Maintenance Window task that should be retrieved. :type InvocationId: string :param InvocationId: **[REQUIRED]** The invocation ID to retrieve. :rtype: dict :returns: """ pass def get_maintenance_window_task(self, WindowId: str, WindowTaskId: str) -> Dict: """ Lists the tasks in a Maintenance Window. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowTask>`_ **Request Syntax** :: response = client.get_maintenance_window_task( WindowId='string', WindowTaskId='string' ) **Response Syntax** :: { 'WindowId': 'string', 'WindowTaskId': 'string', 'Targets': [ { 'Key': 'string', 'Values': [ 'string', ] }, ], 'TaskArn': 'string', 'ServiceRoleArn': 'string', 'TaskType': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA', 'TaskParameters': { 'string': { 'Values': [ 'string', ] } }, 'TaskInvocationParameters': { 'RunCommand': { 'Comment': 'string', 'DocumentHash': 'string', 'DocumentHashType': 'Sha256'|'Sha1', 'NotificationConfig': { 'NotificationArn': 'string', 'NotificationEvents': [ 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed', ], 'NotificationType': 'Command'|'Invocation' }, 'OutputS3BucketName': 'string', 'OutputS3KeyPrefix': 'string', 'Parameters': { 'string': [ 'string', ] }, 'ServiceRoleArn': 'string', 'TimeoutSeconds': 123 }, 'Automation': { 'DocumentVersion': 'string', 'Parameters': { 'string': [ 'string', ] } }, 'StepFunctions': { 'Input': 'string', 'Name': 'string' }, 'Lambda': { 'ClientContext': 'string', 'Qualifier': 'string', 'Payload': b'bytes' } }, 'Priority': 123, 'MaxConcurrency': 'string', 'MaxErrors': 'string', 'LoggingInfo': { 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'S3Region': 'string' }, 'Name': 'string', 'Description': 'string' } **Response Structure** - *(dict) --* - **WindowId** *(string) --* The retrieved Maintenance Window ID. - **WindowTaskId** *(string) --* The retrieved Maintenance Window task ID. - **Targets** *(list) --* The targets where the task should run. - *(dict) --* An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don't provide one or more instance IDs in the call. - **Key** *(string) --* User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* . - **Values** *(list) --* User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* . - *(string) --* - **TaskArn** *(string) --* The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTION tasks, the value is the state machine ARN. - **ServiceRoleArn** *(string) --* The IAM service role to assume during task execution. - **TaskType** *(string) --* The type of task to run. - **TaskParameters** *(dict) --* The parameters to pass to the task when it runs. .. note:: ``TaskParameters`` has been deprecated. To specify parameters to pass to a task when it runs, instead use the ``Parameters`` option in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters . - *(string) --* - *(dict) --* Defines the values for a task parameter. - **Values** *(list) --* This field contains an array of 0 or more strings, each 1 to 255 characters in length. - *(string) --* - **TaskInvocationParameters** *(dict) --* The parameters to pass to the task when it runs. - **RunCommand** *(dict) --* The parameters for a RUN_COMMAND task type. - **Comment** *(string) --* Information about the command(s) to run. - **DocumentHash** *(string) --* The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated. - **DocumentHashType** *(string) --* SHA-256 or SHA-1. SHA-1 hashes have been deprecated. - **NotificationConfig** *(dict) --* Configurations for sending notifications about command status changes on a per-instance basis. - **NotificationArn** *(string) --* An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic. - **NotificationEvents** *(list) --* The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see `Configuring Amazon SNS Notifications for Run Command <http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html>`__ in the *AWS Systems Manager User Guide* . - *(string) --* - **NotificationType** *(string) --* Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes. - **OutputS3BucketName** *(string) --* The name of the Amazon S3 bucket. - **OutputS3KeyPrefix** *(string) --* The Amazon S3 bucket subfolder. - **Parameters** *(dict) --* The parameters for the RUN_COMMAND task execution. - *(string) --* - *(list) --* - *(string) --* - **ServiceRoleArn** *(string) --* The IAM service role to assume during task execution. - **TimeoutSeconds** *(integer) --* If this time is reached and the command has not already started running, it doesn't run. - **Automation** *(dict) --* The parameters for an AUTOMATION task type. - **DocumentVersion** *(string) --* The version of an Automation document to use during task execution. - **Parameters** *(dict) --* The parameters for the AUTOMATION task. For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask . .. note:: ``LoggingInfo`` has been deprecated. To specify an S3 bucket to contain logs, instead use the ``OutputS3BucketName`` and ``OutputS3KeyPrefix`` options in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters . ``TaskParameters`` has been deprecated. To specify parameters to pass to a task when it runs, instead use the ``Parameters`` option in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters . For AUTOMATION task types, Systems Manager ignores any values specified for these parameters. - *(string) --* - *(list) --* - *(string) --* - **StepFunctions** *(dict) --* The parameters for a STEP_FUNCTION task type. - **Input** *(string) --* The inputs for the STEP_FUNCTION task. - **Name** *(string) --* The name of the STEP_FUNCTION task. - **Lambda** *(dict) --* The parameters for a LAMBDA task type. - **ClientContext** *(string) --* Pass client-specific information to the Lambda function that you are invoking. You can then process the client information in your Lambda function as you choose through the context variable. - **Qualifier** *(string) --* (Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to
<filename>remoteobjects/http.py # Copyright (c) 2009 Six Apart Ltd. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of Six Apart Ltd. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import simplejson as json from remoteobjects.json import ForgivingDecoder import httplib2 import httplib import logging from remoteobjects.dataobject import DataObject, DataObjectMetaclass from remoteobjects import fields userAgent = httplib2.Http() log = logging.getLogger('remoteobjects.http') def omit_nulls(data): """Strips `None` values from a dictionary or `RemoteObject` instance.""" if not isinstance(data, dict): if not hasattr(data, '__dict__'): return str(data) data = dict(data.__dict__) for key in data.keys(): if data[key] is None: del data[key] return data class HttpObject(DataObject): """A `DataObject` that can be fetched and put over HTTP through a RESTful JSON API.""" response_has_content = { httplib.CREATED: True, httplib.MOVED_PERMANENTLY: True, httplib.FOUND: True, httplib.OK: True, httplib.NOT_MODIFIED: True, httplib.NO_CONTENT: False, } location_headers = { httplib.CREATED: 'Location', httplib.MOVED_PERMANENTLY: 'Location', httplib.FOUND: 'Location', } content_types = ('application/json',) class NotFound(httplib.HTTPException): """An HTTPException thrown when the server reports that the requested resource was not found.""" pass class Unauthorized(httplib.HTTPException): """An HTTPException thrown when the server reports that the requested resource is not available through an unauthenticated request. This exception corresponds to the HTTP status code 401. Thus when this exception is received, the caller may need to try again using the available authentication credentials. """ pass class Forbidden(httplib.HTTPException): """An HTTPException thrown when the server reports that the client, as authenticated, is not authorized to request the requested resource. This exception corresponds to the HTTP status code 403. Thus when this exception is received, nothing the caller (as currently authenticated) can do will make the requested resource available. """ pass class PreconditionFailed(httplib.HTTPException): """An HTTPException thrown when the server reports that some of the conditions in a conditional request were not true. This exception corresponds to the HTTP status code 412. The most common cause of this status is an attempt to ``PUT`` a resource that has already changed on the server. """ pass class RequestError(httplib.HTTPException): """An HTTPException thrown when the server reports an error in the client's request. This exception corresponds to the HTTP status code 400. """ pass class ServerError(httplib.HTTPException): """An HTTPException thrown when the server reports an unexpected error. This exception corresponds to the HTTP status code 500. """ pass class BadResponse(httplib.HTTPException): """An HTTPException thrown when the client receives some other non-success HTTP response.""" pass def __init__(self, **kwargs): self._location = None super(HttpObject, self).__init__(**kwargs) @classmethod def statefields(cls): return super(HttpObject, cls).statefields() + ['_location', '_etag'] def get_request(self, url=None, headers=None, **kwargs): """Returns the parameters for requesting this `RemoteObject` instance as a dictionary of keyword arguments suitable for passing to `httplib2.Http.request()`. Optional parameter `headers` are also included in the request as HTTP headers. Other optional keyword parameters are also included as specified. """ if url is None: url = self._location if headers is None: headers = {} if 'accept' not in headers: headers['accept'] = ', '.join(self.content_types) # Use 'uri' because httplib2.request does. request = dict(uri=url, headers=headers) request.update(kwargs) return request @classmethod def raise_for_response(cls, url, response, content): """Raises exceptions corresponding to invalid HTTP responses that instances of this class can't be updated from. Override this method to customize the error handling behavior of `RemoteObject` for your target API. For example, if your API illegally omits ``Location`` headers from 201 Created responses, override this method to check for and allow them. """ # Turn exceptional httplib2 responses into exceptions. classname = cls.__name__ if response.status == httplib.NOT_FOUND: raise cls.NotFound('No such %s %s' % (classname, url)) if response.status == httplib.UNAUTHORIZED: raise cls.Unauthorized('Not authorized to fetch %s %s' % (classname, url)) if response.status == httplib.FORBIDDEN: raise cls.Forbidden('Forbidden from fetching %s %s' % (classname, url)) if response.status == httplib.PRECONDITION_FAILED: raise cls.PreconditionFailed('Precondition failed for %s request to %s' % (classname, url)) if response.status in (httplib.INTERNAL_SERVER_ERROR, httplib.BAD_REQUEST): if response.status == httplib.BAD_REQUEST: err_cls = cls.RequestError else: err_cls = cls.ServerError # Pull out an error if we can. content_type = response.get('content-type', '').split(';', 1)[0].strip() if content_type == 'text/plain': error = content.split('\n', 2)[0] exc = err_cls('%d %s requesting %s %s: %s' % (response.status, response.reason, classname, url, error)) exc.response_error = error raise exc raise err_cls('%d %s requesting %s %s' % (response.status, response.reason, classname, url)) try: response_has_content = cls.response_has_content[response.status] except KeyError: # we only expect the statuses that we know do or don't have content raise cls.BadResponse('Unexpected response requesting %s %s: %d %s' % (classname, url, response.status, response.reason)) try: location_header = cls.location_headers[response.status] except KeyError: pass else: if location_header.lower() not in response: raise cls.BadResponse( "%r header missing from %d %s response requesting %s %s" % (location_header, response.status, response.reason, classname, url)) if not response_has_content: # then there's no content-type either, so we're done return # check that the response body was json content_type = response.get('content-type', '').split(';', 1)[0].strip() if content_type not in cls.content_types: raise cls.BadResponse( 'Bad response fetching %s %s: content-type %s is not an expected type' % (classname, url, response.get('content-type'))) def update_from_response(self, url, response, content): """Adds the content of this HTTP response and message body to this `RemoteObject` instance. Use `update_from_response()` only when you would use `DataObject.update_from_dict()`: when decoding outside content (in this case an HTTP response) into an existing `RemoteObject` instance. If the response is not a successful response from which the `RemoteObject` instance can be updated, an appropriate exception will be raised (as determined by the instance's `raise_from_response()` method). If the response includes a new location URL in the appropriate header (depending on the response status), the location of the `RemoteObject` instance is updated as well. """ self.raise_for_response(url, response, content) try: data = json.loads(content) except UnicodeDecodeError: data = json.loads(content, cls=ForgivingDecoder) self.update_from_dict(data) location_header = self.location_headers.get(response.status) if location_header is None: self._location = url else: self._location = response[location_header.lower()] if 'etag' in response: self._etag = response['etag'] @classmethod def get(cls, url, http=None, **kwargs): """Fetches a new `RemoteObject` instance from a URL. Parameter `url` is the URL from which the object should be requested. Optional parameter `http` is the user agent object to use for fetching. `http` should be compatible with `httplib2.Http` instances. """ self = cls() request = self.get_request(url=url, **kwargs) if http is None: http = userAgent response, content = http.request(**request) self.update_from_response(url, response, content) return self def post(self, obj, http=None): """Add another `RemoteObject` to this remote resource through an HTTP ``POST`` request. Parameter `obj` is a `RemoteObject` instance to save to this instance's resource. For example, this (`self`) may be a collection to which you want to post an asset (`obj`). Optional parameter `http` is the user agent object to use for posting. `http` should be compatible with `httplib2.Http` objects. """ if getattr(self, '_location', None) is None: raise ValueError('Cannot add %r to %r with no URL to POST to' % (obj, self)) body = json.dumps(obj.to_dict(), default=omit_nulls) headers = {'content-type': self.content_types[0]} request = obj.get_request(url=self._location, method='POST', body=body, headers=headers) if http is None: http = userAgent response, content = http.request(**request)
<reponame>ouyang-w-19/decogo # NLP written by GAMS Convert at 04/21/18 13:51:14 # # Equation counts # Total E G L N X C B # 166 166 0 0 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 317 317 0 0 0 0 0 0 # FX 53 53 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 821 595 226 0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=0.714277270296959) m.x2 = Var(within=Reals,bounds=(None,None),initialize=0.213455359357076) m.x3 = Var(within=Reals,bounds=(None,None),initialize=-0.000257460042516337) m.x4 = Var(within=Reals,bounds=(None,None),initialize=0.267446625046681) m.x5 = Var(within=Reals,bounds=(None,None),initialize=0.428981457932639) m.x6 = Var(within=Reals,bounds=(None,None),initialize=0.706421402256235) m.x7 = Var(within=Reals,bounds=(None,None),initialize=1.23179277222266) m.x8 = Var(within=Reals,bounds=(None,None),initialize=1.1923022297969) m.x9 = Var(within=Reals,bounds=(None,None),initialize=1) m.x10 = Var(within=Reals,bounds=(None,None),initialize=0.531271066405917) m.x11 = Var(within=Reals,bounds=(None,None),initialize=0.37852116602787) m.x12 = Var(within=Reals,bounds=(None,None),initialize=0.0259822061255019) m.x13 = Var(within=Reals,bounds=(None,None),initialize=0.613866884603052) m.x14 = Var(within=Reals,bounds=(None,None),initialize=0.912812569152467) m.x15 = Var(within=Reals,bounds=(None,None),initialize=0.0233052515549957) m.x16 = Var(within=Reals,bounds=(None,None),initialize=0.0359433346141142) m.x17 = Var(within=Reals,bounds=(None,None),initialize=0.0397474756614438) m.x18 = Var(within=Reals,bounds=(None,None),initialize=0.0172169283352343) m.x19 = Var(within=Reals,bounds=(None,None),initialize=0.00761194936907785) m.x20 = Var(within=Reals,bounds=(None,None),initialize=0.0456959504315114) m.x21 = Var(within=Reals,bounds=(None,None),initialize=0.0141724551070975) m.x22 = Var(within=Reals,bounds=(None,None),initialize=0.307728859298738) m.x23 = Var(within=Reals,bounds=(None,None),initialize=0.0414914804160212) m.x24 = Var(within=Reals,bounds=(None,None),initialize=0.0659507832795914) m.x25 = Var(within=Reals,bounds=(None,None),initialize=-0.280822769860641) m.x26 = Var(within=Reals,bounds=(None,None),initialize=-0.192302229796904) m.x27 = Var(within=Reals,bounds=(None,None),initialize=0.388881181040466) m.x28 = Var(within=Reals,bounds=(None,None),initialize=0.268505801367806) m.x29 = Var(within=Reals,bounds=(0,0),initialize=0) m.x30 = Var(within=Reals,bounds=(None,None),initialize=14.827424) m.x31 = Var(within=Reals,bounds=(0,0),initialize=0) m.x32 = Var(within=Reals,bounds=(0,0),initialize=0) m.x33 = Var(within=Reals,bounds=(None,None),initialize=2.101049) m.x34 = Var(within=Reals,bounds=(None,None),initialize=-0.000327) m.x35 = Var(within=Reals,bounds=(0,0),initialize=0) m.x36 = Var(within=Reals,bounds=(0,0),initialize=0) m.x37 = Var(within=Reals,bounds=(None,None),initialize=1.488157) m.x38 = Var(within=Reals,bounds=(None,None),initialize=7.917504) m.x39 = Var(within=Reals,bounds=(0,0),initialize=0) m.x40 = Var(within=Reals,bounds=(0,0),initialize=0) m.x41 = Var(within=Reals,bounds=(0,0),initialize=0) m.x42 = Var(within=Reals,bounds=(None,None),initialize=6.953332) m.x43 = Var(within=Reals,bounds=(None,None),initialize=1.5645) m.x44 = Var(within=Reals,bounds=(None,None),initialize=2.5185) m.x45 = Var(within=Reals,bounds=(None,None),initialize=2.597798) m.x46 = Var(within=Reals,bounds=(0,0),initialize=0) m.x47 = Var(within=Reals,bounds=(None,None),initialize=9.805414) m.x48 = Var(within=Reals,bounds=(0,0),initialize=0) m.x49 = Var(within=Reals,bounds=(0,0),initialize=0) m.x50 = Var(within=Reals,bounds=(0,0),initialize=0) m.x51 = Var(within=Reals,bounds=(0,0),initialize=0) m.x52 = Var(within=Reals,bounds=(0,0),initialize=0) m.x53 = Var(within=Reals,bounds=(0,0),initialize=0) m.x54 = Var(within=Reals,bounds=(0,0),initialize=0) m.x55 = Var(within=Reals,bounds=(0,0),initialize=0) m.x56 = Var(within=Reals,bounds=(0,0),initialize=0) m.x57 = Var(within=Reals,bounds=(0,0),initialize=0) m.x58 = Var(within=Reals,bounds=(None,None),initialize=3.699706) m.x59 = Var(within=Reals,bounds=(0,0),initialize=0) m.x60 = Var(within=Reals,bounds=(0,0),initialize=0) m.x61 = Var(within=Reals,bounds=(None,None),initialize=0.033) m.x62 = Var(within=Reals,bounds=(0,0),initialize=0) m.x63 = Var(within=Reals,bounds=(0,0),initialize=0) m.x64 = Var(within=Reals,bounds=(0,0),initialize=0) m.x65 = Var(within=Reals,bounds=(0,0),initialize=0) m.x66 = Var(within=Reals,bounds=(0,0),initialize=0) m.x67 = Var(within=Reals,bounds=(None,None),initialize=6) m.x68 = Var(within=Reals,bounds=(None,None),initialize=3.3) m.x69 = Var(within=Reals,bounds=(0,0),initialize=0) m.x70 = Var(within=Reals,bounds=(None,None),initialize=0.0296) m.x71 = Var(within=Reals,bounds=(0,0),initialize=0) m.x72 = Var(within=Reals,bounds=(0,0),initialize=0) m.x73 = Var(within=Reals,bounds=(None,None),initialize=0.2) m.x74 = Var(within=Reals,bounds=(None,None),initialize=0.7336) m.x75 = Var(within=Reals,bounds=(None,None),initialize=0.3574) m.x76 = Var(within=Reals,bounds=(None,None),initialize=0.0744) m.x77 = Var(within=Reals,bounds=(None,None),initialize=0.1652) m.x78 = Var(within=Reals,bounds=(None,None),initialize=0.1395) m.x79 = Var(within=Reals,bounds=(0,0),initialize=0) m.x80 = Var(within=Reals,bounds=(0,0),initialize=0) m.x81 = Var(within=Reals,bounds=(0,0),initialize=0) m.x82 = Var(within=Reals,bounds=(0,0),initialize=0) m.x83 = Var(within=Reals,bounds=(0,0),initialize=0) m.x84 = Var(within=Reals,bounds=(0,0),initialize=0) m.x85 = Var(within=Reals,bounds=(0,0),initialize=0) m.x86 = Var(within=Reals,bounds=(0,0),initialize=0) m.x87 = Var(within=Reals,bounds=(0,0),initialize=0) m.x88 = Var(within=Reals,bounds=(0,0),initialize=0) m.x89 = Var(within=Reals,bounds=(0,0),initialize=0) m.x90 = Var(within=Reals,bounds=(0,0),initialize=0) m.x91 = Var(within=Reals,bounds=(None,None),initialize=1.7123) m.x92 = Var(within=Reals,bounds=(0,0),initialize=0) m.x93 = Var(within=Reals,bounds=(0,0),initialize=0) m.x94 = Var(within=Reals,bounds=(0,0),initialize=0) m.x95 = Var(within=Reals,bounds=(None,None),initialize=0.15) m.x96 = Var(within=Reals,bounds=(None,None),initialize=0.649156) m.x97 = Var(within=Reals,bounds=(None,None),initialize=-0.356673) m.x98 = Var(within=Reals,bounds=(None,None),initialize=-0.4062) m.x99 = Var(within=Reals,bounds=(0,0),initialize=0) m.x100 = Var(within=Reals,bounds=(None,None),initialize=2.163857) m.x101 = Var(within=Reals,bounds=(0,0),initialize=0) m.x102 = Var(within=Reals,bounds=(None,None),initialize=5.573815) m.x103 = Var(within=Reals,bounds=(0,0),initialize=0) m.x104 = Var(within=Reals,bounds=(0,0),initialize=0) m.x105 = Var(within=Reals,bounds=(0,0),initialize=0) m.x106 = Var(within=Reals,bounds=(0,0),initialize=0) m.x107 = Var(within=Reals,bounds=(0,0),initialize=0) m.x108 = Var(within=Reals,bounds=(0,0),initialize=0) m.x109 = Var(within=Reals,bounds=(0,0),initialize=0) m.x110 = Var(within=Reals,bounds=(None,None),initialize=9.805414) m.x111 = Var(within=Reals,bounds=(None,None),initialize=10.896741) m.x112 = Var(within=Reals,bounds=(None,None),initialize=18.4364105) m.x113 = Var(within=Reals,bounds=(None,None),initialize=21.1551365) m.x114 = Var(within=Reals,bounds=(None,None),initialize=9.78976) m.x115 = Var(within=Reals,bounds=(None,None),initialize=3.673953) m.x116 = Var(within=Reals,bounds=(None,None),initialize=9.6863185) m.x117 = Var(within=Reals,bounds=(None,None),initialize=1.3701) m.x118 = Var(within=Reals,bounds=(None,None),initialize=1.9123) m.x119 = Var(within=Reals,bounds=(None,None),initialize=2.398969) m.x120 = Var(within=Reals,bounds=(None,None),initialize=5.5690645) m.x121 = Var(within=Reals,bounds=(None,None),initialize=0) m.x122 = Var(within=Reals,bounds=(None,None),initialize=0) m.x123 = Var(within=Reals,bounds=(None,None),initialize=0) m.x124 = Var(within=Reals,bounds=(None,None),initialize=0) m.x125 = Var(within=Reals,bounds=(None,None),initialize=0) m.x126 = Var(within=Reals,bounds=(None,None),initialize=0) m.x127 = Var(within=Reals,bounds=(None,None),initialize=0) m.x128 = Var(within=Reals,bounds=(None,None),initialize=0) m.x129 = Var(within=Reals,bounds=(None,None),initialize=0) m.x130 = Var(within=Reals,bounds=(None,None),initialize=0) m.x131 = Var(within=Reals,bounds=(None,None),initialize=0) m.x132 = Var(within=Reals,bounds=(None,None),initialize=0) m.x133 = Var(within=Reals,bounds=(None,None),initialize=0) m.x134 = Var(within=Reals,bounds=(None,None),initialize=0) m.x135 = Var(within=Reals,bounds=(None,None),initialize=0) m.x136 = Var(within=Reals,bounds=(None,None),initialize=0) m.x137 = Var(within=Reals,bounds=(None,None),initialize=0) m.x138 = Var(within=Reals,bounds=(None,None),initialize=0) m.x139 = Var(within=Reals,bounds=(None,None),initialize=0) m.x140 = Var(within=Reals,bounds=(None,None),initialize=0) m.x141 = Var(within=Reals,bounds=(None,None),initialize=0) m.x142 = Var(within=Reals,bounds=(None,None),initialize=0) m.x143 = Var(within=Reals,bounds=(None,None),initialize=0) m.x144 = Var(within=Reals,bounds=(None,None),initialize=0) m.x145 = Var(within=Reals,bounds=(None,None),initialize=0) m.x146 = Var(within=Reals,bounds=(None,None),initialize=0) m.x147 = Var(within=Reals,bounds=(None,None),initialize=0) m.x148 = Var(within=Reals,bounds=(None,None),initialize=0) m.x149 = Var(within=Reals,bounds=(None,None),initialize=0) m.x150 = Var(within=Reals,bounds=(None,None),initialize=0) m.x151 = Var(within=Reals,bounds=(None,None),initialize=0) m.x152 = Var(within=Reals,bounds=(None,None),initialize=0) m.x153 = Var(within=Reals,bounds=(None,None),initialize=0) m.x154 = Var(within=Reals,bounds=(None,None),initialize=0) m.x155 = Var(within=Reals,bounds=(None,None),initialize=0) m.x156 = Var(within=Reals,bounds=(None,None),initialize=0) m.x157 = Var(within=Reals,bounds=(None,None),initialize=0) m.x158 = Var(within=Reals,bounds=(None,None),initialize=0) m.x159 = Var(within=Reals,bounds=(None,None),initialize=0) m.x160 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x161 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x162 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x163 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x164 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x165 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x166 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x167 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x168 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x169 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x170 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x171 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x172 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x173 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x174 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x175 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x176 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x177 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x178 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x179 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x180 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x181 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x182 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x183 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x184 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x185 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x186 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x187 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x188 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x189 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x190 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x191 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x192 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x193 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x194 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x195 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x196 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x197 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x198 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x199 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x200 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x201 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x202 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x203 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x204 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x205 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x206 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x207 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x208 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x209 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x210 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x211 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x212 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x213 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x214 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x215 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x216 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x217 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x218 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x219 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x220 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x221 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x222 = Var(within=Reals,bounds=(0,1),initialize=0.142857142857143) m.x223 = Var(within=Reals,bounds=(0,1),initialize=0.00617283950617284) m.x224 = Var(within=Reals,bounds=(0,1),initialize=0.197530864197531) m.x225 = Var(within=Reals,bounds=(0,1),initialize=0.592592592592593) m.x226 = Var(within=Reals,bounds=(0,1),initialize=0.197530864197531) m.x227 = Var(within=Reals,bounds=(0,1),initialize=0.00617283950617284) m.x228 = Var(within=Reals,bounds=(0,1),initialize=0.00617283950617284) m.x229 = Var(within=Reals,bounds=(0,1),initialize=0.197530864197531) m.x230 = Var(within=Reals,bounds=(0,1),initialize=0.592592592592593) m.x231 = Var(within=Reals,bounds=(0,1),initialize=0.197530864197531) m.x232 = Var(within=Reals,bounds=(0,1),initialize=0.00617283950617284) m.x233 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x234 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x235 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x236 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x237 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x238 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x239 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x240 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x241 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x242 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x243 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x244 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x245 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x246 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x247 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x248 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x249 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x250 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x251 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x252 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x253 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x254 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x255 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x256 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x257 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x258 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x259 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x260 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x261 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x262 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x263 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x264 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x265 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x266 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x267 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x268 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x269 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x270 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x271 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x272 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x273 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x274 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x275 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x276 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x277 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x278 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x279 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x280 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x281 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x282 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x283 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x284 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x285 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x286 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x287 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x288 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x289 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x290 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x291 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x292 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x293 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x294 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x295 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x296 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x297 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x298 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x299 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x300 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x301 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x302 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x303 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x304 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x305 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x306 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x307 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x308 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x309 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x310 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x311 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x312 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x313 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x314 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.x315 = Var(within=Reals,bounds=(0,1),initialize=0.888888888888889) m.x316 = Var(within=Reals,bounds=(0,1),initialize=0.0555555555555556) m.obj = Objective(expr=Centropy(m.x233,0.0555555555555556) + Centropy(m.x234,0.888888888888889) + Centropy(m.x235, 0.0555555555555556) + Centropy(m.x236,0.0555555555555556) + Centropy(m.x237,0.888888888888889) + Centropy(m.x238,0.0555555555555556) + Centropy(m.x239,0.0555555555555556) + Centropy(m.x240, 0.888888888888889) + Centropy(m.x241,0.0555555555555556) + Centropy(m.x242,0.0555555555555556) + Centropy(m.x243,0.888888888888889) + Centropy(m.x244,0.0555555555555556) + Centropy(m.x245, 0.0555555555555556) + Centropy(m.x246,0.888888888888889) + Centropy(m.x247,0.0555555555555556) + Centropy(m.x248,0.0555555555555556) + Centropy(m.x249,0.888888888888889) + Centropy(m.x250, 0.0555555555555556) + Centropy(m.x251,0.0555555555555556) + Centropy(m.x252,0.888888888888889) + Centropy(m.x253,0.0555555555555556) + Centropy(m.x254,0.0555555555555556) + Centropy(m.x255, 0.888888888888889) + Centropy(m.x256,0.0555555555555556) + Centropy(m.x257,0.0555555555555556) + Centropy(m.x258,0.888888888888889) + Centropy(m.x259,0.0555555555555556) + Centropy(m.x260, 0.0555555555555556) + Centropy(m.x261,0.888888888888889) + Centropy(m.x262,0.0555555555555556) + Centropy(m.x263,0.0555555555555556) + Centropy(m.x264,0.888888888888889) + Centropy(m.x265, 0.0555555555555556) + Centropy(m.x266,0.0555555555555556) + Centropy(m.x267,0.888888888888889) + Centropy(m.x268,0.0555555555555556) + Centropy(m.x269,0.0555555555555556) + Centropy(m.x270, 0.888888888888889) + Centropy(m.x271,0.0555555555555556) + Centropy(m.x272,0.0555555555555556) + Centropy(m.x273,0.888888888888889) + Centropy(m.x274,0.0555555555555556) + Centropy(m.x275, 0.0555555555555556) + Centropy(m.x276,0.888888888888889) + Centropy(m.x277,0.0555555555555556) + Centropy(m.x278,0.0555555555555556) + Centropy(m.x279,0.888888888888889) + Centropy(m.x280, 0.0555555555555556) + Centropy(m.x281,0.0555555555555556) + Centropy(m.x282,0.888888888888889) + Centropy(m.x283,0.0555555555555556) + Centropy(m.x284,0.0555555555555556) + Centropy(m.x285, 0.888888888888889) + Centropy(m.x286,0.0555555555555556) + Centropy(m.x287,0.0555555555555556) + Centropy(m.x288,0.888888888888889) + Centropy(m.x289,0.0555555555555556) + Centropy(m.x290, 0.0555555555555556) + Centropy(m.x291,0.888888888888889) + Centropy(m.x292,0.0555555555555556) + Centropy(m.x293,0.0555555555555556) + Centropy(m.x294,0.888888888888889) + Centropy(m.x295, 0.0555555555555556) + Centropy(m.x296,0.0555555555555556) + Centropy(m.x297,0.888888888888889) + Centropy(m.x298,0.0555555555555556) + Centropy(m.x299,0.0555555555555556) + Centropy(m.x300, 0.888888888888889) + Centropy(m.x301,0.0555555555555556) + Centropy(m.x302,0.0555555555555556) + Centropy(m.x303,0.888888888888889) + Centropy(m.x304,0.0555555555555556) + Centropy(m.x305, 0.0555555555555556) + Centropy(m.x306,0.888888888888889) + Centropy(m.x307,0.0555555555555556) + Centropy(m.x308,0.0555555555555556) + Centropy(m.x309,0.888888888888889) + Centropy(m.x310, 0.0555555555555556) + Centropy(m.x311,0.0555555555555556) + Centropy(m.x312,0.888888888888889) + Centropy(m.x313,0.0555555555555556) + Centropy(m.x314,0.0555555555555556) + Centropy(m.x315, 0.888888888888889) + Centropy(m.x316,0.0555555555555556) + Centropy(m.x160,0.142857142857143) + Centropy(m.x161,0.142857142857143) + Centropy(m.x162,0.142857142857143) + Centropy(m.x163, 0.142857142857143) + Centropy(m.x164,0.142857142857143) + Centropy(m.x165,0.142857142857143) + Centropy(m.x166,0.142857142857143) + Centropy(m.x167,0.142857142857143) + Centropy(m.x168, 0.142857142857143) + Centropy(m.x169,0.142857142857143) + Centropy(m.x170,0.142857142857143) + Centropy(m.x171,0.142857142857143) + Centropy(m.x172,0.142857142857143) + Centropy(m.x173, 0.142857142857143) + Centropy(m.x174,0.142857142857143) + Centropy(m.x175,0.142857142857143) + Centropy(m.x176,0.142857142857143) + Centropy(m.x177,0.142857142857143) + Centropy(m.x178, 0.142857142857143) + Centropy(m.x179,0.142857142857143) + Centropy(m.x180,0.142857142857143) + Centropy(m.x181,0.142857142857143) + Centropy(m.x182,0.142857142857143) + Centropy(m.x183, 0.142857142857143) + Centropy(m.x184,0.142857142857143) + Centropy(m.x185,0.142857142857143) + Centropy(m.x186,0.142857142857143) + Centropy(m.x187,0.142857142857143) + Centropy(m.x188, 0.142857142857143) + Centropy(m.x189,0.142857142857143) + Centropy(m.x190,0.142857142857143) + Centropy(m.x191,0.142857142857143) + Centropy(m.x192,0.142857142857143) + Centropy(m.x193, 0.142857142857143) + Centropy(m.x194,0.142857142857143) + Centropy(m.x195,0.142857142857143) + Centropy(m.x196,0.142857142857143) + Centropy(m.x197,0.142857142857143) + Centropy(m.x198, 0.142857142857143) + Centropy(m.x199,0.142857142857143) + Centropy(m.x200,0.142857142857143) + Centropy(m.x201,0.142857142857143) + Centropy(m.x202,0.142857142857143) + Centropy(m.x203, 0.142857142857143) + Centropy(m.x204,0.142857142857143) + Centropy(m.x205,0.142857142857143) + Centropy(m.x206,0.142857142857143) + Centropy(m.x207,0.142857142857143) + Centropy(m.x208, 0.142857142857143) + Centropy(m.x209,0.142857142857143) + Centropy(m.x210,0.142857142857143) + Centropy(m.x211,0.142857142857143) + Centropy(m.x212,0.142857142857143) + Centropy(m.x213, 0.142857142857143) + Centropy(m.x214,0.142857142857143) + Centropy(m.x215,0.142857142857143) + Centropy(m.x216,0.142857142857143) + Centropy(m.x217,0.142857142857143) + Centropy(m.x218, 0.142857142857143) + Centropy(m.x219,0.142857142857143) + Centropy(m.x220,0.142857142857143) + Centropy(m.x221,0.142857142857143) + Centropy(m.x222,0.142857142857143) + Centropy(m.x223, 0.00617283950617284) + Centropy(m.x224,0.197530864197531) + Centropy(m.x225,0.592592592592593) + Centropy(m.x226,0.197530864197531) + Centropy(m.x227,0.00617283950617284) + Centropy(m.x228, 0.00617283950617284) + Centropy(m.x229,0.197530864197531) + Centropy(m.x230,0.592592592592593) + Centropy(m.x231,0.197530864197531) + Centropy(m.x232,0.00617283950617284), sense=minimize) m.c1 = Constraint(expr= m.x112 - m.x121 == 18.4364105) m.c2 = Constraint(expr= m.x113 - m.x122 == 21.1551365) m.c3 = Constraint(expr= m.x114 - m.x123 == 9.78976) m.c4 = Constraint(expr= m.x115 - m.x124 == 3.673953) m.c5 = Constraint(expr= m.x116 - m.x125 == 9.6863185) m.c6 = Constraint(expr= m.x117 - m.x126 == 1.3701) m.c7 = Constraint(expr= m.x118 - m.x127 == 1.9123) m.c8 = Constraint(expr= m.x119 - m.x128 == 2.398969) m.c9 = Constraint(expr= m.x120 - m.x129 == 5.5690645) m.c10 = Constraint(expr= m.x29 + m.x30 + m.x31 + m.x32 + m.x33 + m.x34 + m.x35 + m.x36 + m.x37 - m.x112 == 0) m.c11 = Constraint(expr= m.x38 + m.x39 + m.x40 + m.x41 + m.x42 + m.x43 + m.x44 + m.x45 + m.x46 - m.x113 == 0) m.c12 = Constraint(expr= m.x47 + m.x48 + m.x49 + m.x50 + m.x51 + m.x52 + m.x53 + m.x54 + m.x55 - m.x114 == 0) m.c13 = Constraint(expr= m.x56 + m.x57 + m.x58 + m.x59 + m.x60 + m.x61 + m.x62 + m.x63 + m.x64 - m.x115 == 0) m.c14 = Constraint(expr= m.x65 + m.x66 + m.x67 + m.x68 + m.x69 + m.x70 + m.x71 + m.x72 + m.x73 - m.x116 == 0) m.c15 = Constraint(expr= m.x74
<gh_stars>1-10 ''' Extensions to SMAP L4C (and calibration) to support soil respiration enhancements related to improved soil hydrology modeling. ''' import os import pickle import warnings import h5py import numpy as np import matplotlib from functools import partial from matplotlib import pyplot from pyl4c import suppress_warnings from pyl4c.apps.calibration.main import CLI, CONFIG from pyl4c.science import arrhenius from pyl4c.stats import linear_constraint from pyl4c.apps.calibration import GenericOptimization, BPLUT, report_fit_stats, solve_least_squares from pyl4c.data.fixtures import restore_bplut # All DEPTHS must be positive DEPTHS = np.array((0.05, 0.15, 0.35, 0.75, 1.5, 3.0))\ .reshape((6,1)) # meters # Constrained optimization bounds OPT_BOUNDS = { 'reco_z': ( # CUE, tsoil, smsf0, smsf1, k_depth_decay np.array((0.2, 1, -30, 25, 0.3)), np.array((0.7, 800, 24.9, 100, 1.0))), 'reco_z_power': ( # CUE, tsoil, smsf0, smsf1, z_tau_a, z_tau_b np.array((0.2, 1, -30, 25, 0.01, 0.01)), np.array((0.7, 800, 24.9, 100, 1.00, 1.00))), # After Davidson et al. (2012)... # Median d_gas in completely dry soil conditions (soil VWC < 5th # percentile): 3.82 'reco_o2_limit': ( # CUE, tsoil, smsf0, smsf1, k_depth_decay, d_gas, km_oxy np.array((0.2, 1, -30, 25, 0.05, 3, 0.01)), np.array((0.7, 800, 24.9, 100, 1.50, 5, 0.15))), } NEW_PARAMETERS = ('k_depth_decay', 'd_gas', 'km_oxy', 'z_tau_a', 'z_tau_b') L4C_PARAMETERS = list(BPLUT._labels) L4C_PARAMETERS.extend(NEW_PARAMETERS) class StratifiedSoilCalibrationCLI(CLI): ''' Command line interface for calibrating L4C with a vertically stratified soil organic carbon (SOC) model. Get started by creating a scratch dataset: python hydrology.py setup Optionally, filter the tower GPP and/or RECO time series: python hydrology.py filter-all gpp <window_size> python hydrology.py filter-all reco <window_size> To optimize the RECO parameters for the model with vertically resolved soil organic carbon, specify a PFT class: python hydrology.py pft <pft> tune-reco To optimize the RECO parameters for the vertically resolved model that also includes an O2 diffusion limitation: python hydrology.py pft <pft> tune-reco-o2-limit ''' _model_name = 'reco_z' _parameters = { 'gpp': ( 'LUE', 'tmin0', 'tmin1', 'vpd0', 'vpd1', 'smrz0', 'smrz1', 'ft0'), 'reco_z': ( 'CUE', 'tsoil', 'smsf0', 'smsf1', 'k_depth_decay'), 'reco_z_power': ( 'CUE', 'tsoil', 'smsf0', 'smsf1', 'z_tau_a', 'z_tau_b'), 'reco_o2_limit': ( 'CUE', 'tsoil', 'smsf0', 'smsf1', 'k_depth_decay', 'd_gas', 'km_oxy'), } _path_to_temp_profile = '/home/arthur/Downloads/L4C_experiments/L4C-Phenology/L4_SM_gph_NRv8-3_profile_at_356_tower_sites.h5' _path_to_sm_profile = '/home/arthur/Downloads/L4C_experiments/L4C-Phenology/L4_C_NRv8-3_soil_moisture_profiles_simulated_at_356_tower_sites.h5' def __init__( self, config = CONFIG, pft = None, start = None, end = None, debug = True, use_legacy_pft = True, n_layers = 6): super().__init__( config = config, pft = pft, start = start, end = end, debug = debug, use_legacy_pft = use_legacy_pft) self.depths = DEPTHS[0:n_layers] print('Working with layer depths: %s' % ', '.join(map(lambda v: '%.3f' % v, self.depths))) self.n_layers = n_layers # (Re-)creates the BPLUT store using the correct (expanded) list of # parameter labels self._init_bplut(labels = L4C_PARAMETERS) @suppress_warnings def _configure(self, q_rh, q_k, fixed, model = 'reco_z'): 'Loads driver data, sets starting parameters for RECO calibration' assert self._is_setup, 'Must run setup first' assert q_rh >= 0 and q_rh <= 100 and q_k >= 0 and q_k <= 100,\ 'Invalid setting for "q_rh" or "q_k" parameters' params = self._parameters[model] if fixed is not None: assert all(p in params for p in fixed),\ 'Arguments to "fixed" should be in: [%s]' % ', '.join(params) init_params = self.bplut.flat(self._pft, params) # Read in data, with optional subsetting of the time axis t0 = self._time_start if self._time_start is not None else 0 t1 = self._time_end if self._time_end is not None else self._nsteps self._drivers = [] # Open the soil moisture, temperature driver datasets with h5py.File(self._path_to_temp_profile, 'r') as hdf: # Calculate extent of soil layers, given bedrock depth bedrock = hdf['LAND_MODEL_CONSTANTS/depth_to_bedrock_m'][self._sites] self._layer_mask = self.depths < bedrock self._porosity = hdf['LAND_MODEL_CONSTANTS/porosity'][self._sites] temp_profile = [] temp_profile.append(hdf['L4SM_DAILY_MEAN/surface_temp'][t0:t1,self._sites]) for i in range(1, self.n_layers): temp_profile.append( hdf['L4SM_DAILY_MEAN/soil_temp_layer%d' % i][t0:t1,self._sites]) temp_profile = np.stack(temp_profile) self._drivers.append(temp_profile) # Convert soil VWC to wetness (%) if os.path.basename(self._path_to_sm_profile).split('.').pop() == 'h5': with h5py.File(self._path_to_sm_profile, 'r') as hdf: soil_m = 100 * np.divide( hdf['soil_moisture_vwc'][:,t0:t1,self._sites], self._porosity) # Clip f(SM) response, as wetness values might be unrealistic # given problems in ice-filled soil layers soil_m[soil_m > 100] = 100 self._drivers.append(soil_m) # Add soil moisture in VWC if model == 'reco_o2_limit': self._drivers.append(hdf['soil_moisture_vwc'][:,t0:t1,self._sites]) elif os.path.basename(self._path_to_sm_profile).split('.').pop() == 'pickle': with open(self._path_to_sm_profile, 'rb') as file: profiles = pickle.load(file) soil_m = 100 * np.divide( profiles[:,t0:t1,self._sites], self._porosity) # Clip f(SM) response, as wetness values might be unrealistic # given problems in ice-filled soil layers soil_m[soil_m > 100] = 100 self._drivers.append(soil_m) # Add soil moisture in VWC if model == 'reco_o2_limit': self._drivers.append(profiles[:,t0:t1,self._sites]) # Read in the tower flux data and site weights with h5py.File(self._path_to_scratch, 'r') as hdf: self._gpp_tower = hdf['tower/GPP'][t0:t1,self._sites,:].mean(axis = 2) self._reco_tower = hdf['tower/RECO'][t0:t1,self._sites,:].mean(axis = 2) self._site_weights = hdf['site_weights'][:,self._sites] return init_params def _concentration_O2(self, d_gas, soil_vwc): air_frac_O2 = 0.2095 # Liters of O2 per liter of air (20.95%) return d_gas * air_frac_O2 * np.power(self._porosity - soil_vwc, 4/3) def _k_mult(self, params): 'Calculate K_mult based on current parameters' tsoil, sm = self._drivers # Note that f_z() is NOT included here, because we do not want # cbar() to decline with depth f_tsoil = partial(arrhenius, beta0 = params[1]) f_sm = linear_constraint(params[2], params[3]) return f_tsoil(tsoil) * f_sm(sm) @suppress_warnings def _reco(self, params, q_rh, q_k): 'Modeled ecosystem respiration (RECO) based on current parameters' # Calculate RH as (RECO - RA) or (RECO - (faut * GPP)); ra = ((1 - params[0]) * self._gpp_tower) rh0 = self._reco_tower - ra rh0 = np.where(rh0 < 0, 0, rh0) # Mask out negative RH values # Compute Cbar with globals "q_rh" and "q_k" kmult0 = self._k_mult(params) cbar0 = cbar(rh0, kmult0, q_rh, q_k) # Extinction rate of heterotrophic respiration with depth, due to # factors OTHER THAN temperature, moisture (Koven et al. 2013) f_z = np.exp(-np.abs(self.depths) / params[4]) *\ np.ones(cbar0.shape) # Set RH from layers below the bedrock depth to zero rh = (kmult0 * f_z[:,None,:] * cbar0[:,None,:]).swapaxes(1, 2) rh[~self._layer_mask] = 0 reco0 = ra + rh.swapaxes(1, 2).sum(axis = 0) return reco0 def _tune( self, fit, residuals, init_params, fixed_params, step_sizes, trials, optimize, nlopt): ''' Runs the optimization. Parameters ---------- fit : function The function that returns fit values, given parameters residuals : function The function that returns residuals, given parameters init_params : tuple or list or numpy.ndarray fixed_params : tuple or list or numpy.ndarray step_sizes : tuple or list or numpy.ndarray trials : int optimize : bool nlopt : bool ''' # Get bounds for the parameter search bounds = self._bounds( init_params, self._model_name, fixed_params, bounds = OPT_BOUNDS) params = [] params0 = [] scores = [] param_space = np.linspace(bounds[0], bounds[1], 100) assert not np.isnan(init_params).any(),\ 'One or more NaNs were provided as "init_params"' for t in range(0, trials): # If multiple trials, randomize the initial parameter values # and score the model in each trial if optimize and trials > 1: p = param_space.shape[1] # Number of parameters idx = np.random.randint(0, param_space.shape[0], p) init_params = param_space[idx,np.arange(0, p)] params0.append(init_params) if optimize and not nlopt: # Apply constrained, non-linear least-squares optimization # NOTE: arctan loss function doesn't work well here if 'loss' in kwargs.keys(): print('NOTE: Overriding "loss" function specification') kwargs.update({'loss': 'linear'}) solution = solve_least_squares( residuals, init_params, labels = self._parameters[self._model_name], bounds = bounds, **kwargs) fitted = solution.x.tolist() message = solution.message elif optimize and nlopt: opt = GenericOptimization( residuals, bounds, step_size = step_sizes) try: fitted = opt.solve(init_params) except RuntimeError: params.append(None) scores.append(np.inf) print('Error in objective function; restarting...') continue # Try again! message = 'Success' else: fitted = [None for i in range(0, len(init_params))] break # Do not iterate through trials if not optimizing # Record the found solution and its goodness-of-fit score params.append(fitted) pred = fit(fitted if optimize else init_params) _, rmse_score, _, _ = self._report_fit( self._reco_tower, pred, self._site_weights, verbose = False) print('[%s/%s] RMSE score of last trial: %.3f' % ( str(t + 1).zfill(2), str(trials).zfill(2), rmse_score)) scores.append(rmse_score) # Select the fit params with the best score if optimize and trials > 1: fitted = params[np.argmin(scores)] init_params = params0[np.argmin(scores)] # Generate and print a report, update the BPLUT parameters self._report( init_params, fitted, self._parameters[self._model_name], 'RECO Optimization') pred = fit(fitted if optimize else init_params) self._report_fit(self._reco_tower, pred, self._site_weights) if optimize: user_prompt = input('Update parameters for PFT=%d? [Y/n] ' % self._pft) do_write = user_prompt == 'Y' if do_write: print('Updating parameters for PFT=%d...' % self._pft) self.bplut.update(self._pft, fitted, self._parameters[self._model_name]) def plot_reco( self, driver, model = 'reco_z', q_rh = 75, q_k = 50, by_depth = True, ylim = None, **kwargs): ''' Plots both the
# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\base_interactions.py # Compiled at: 2020-05-28 20:15:33 # Size of source mod 2**32: 34163 bytes import operator from event_testing.results import TestResult, ExecuteResult from interactions import ParticipantType from interactions.aop import AffordanceObjectPair from interactions.base.immediate_interaction import ImmediateSuperInteraction from interactions.base.mixer_interaction import MixerInteraction from interactions.base.super_interaction import SuperInteraction from interactions.constraints import create_constraint_set, ANYWHERE, Nowhere from interactions.context import InteractionContext, QueueInsertStrategy from interactions.interaction_finisher import FinishingType from interactions.join_liability import JOIN_INTERACTION_LIABILITY, JoinInteractionLiability from sims4.localization import TunableLocalizedStringFactory, LocalizationHelperTuning from sims4.tuning.tunable import TunableReference, Tunable, TunableList, TunableTuple, OptionalTunable, TunableEnumEntry, TunableVariant from sims4.tuning.tunable_base import GroupNames from sims4.utils import flexmethod, classproperty from singletons import DEFAULT from ui.ui_dialog_generic import UiDialogTextInputOkCancel, UiDialogTextInputOk import element_utils, interactions, services, sims4.log, sims4.math, sims4.resources logger = sims4.log.Logger('Interactions') class ProxyInteraction(SuperInteraction): INSTANCE_SUBCLASSES_ONLY = True @classproperty def proxy_name(cls): return '[Proxy]' @classmethod def generate(cls, proxied_affordance): class ProxyInstance(cls): INSTANCE_SUBCLASSES_ONLY = True @classproperty def proxied_affordance(cls): return proxied_affordance @classmethod def get_interaction_type(cls): return proxied_affordance.get_interaction_type() ProxyInstance.__name__ = cls.proxy_name + proxied_affordance.__name__ return ProxyInstance @classmethod def potential_pie_menu_sub_interactions_gen(cls, target, context, scoring_gsi_handler=None, **kwargs): pass if False: yield None class JoinInteraction(ProxyInteraction): create_join_solo_solo = TunableLocalizedStringFactory(default=3134556480, description='Interaction name wrapper for when a solo Sim joins another solo Sim.') INSTANCE_SUBCLASSES_ONLY = True @classmethod def generate(cls, proxied_affordance, join_interaction, joinable_info): result = super().generate(proxied_affordance) result.join_interaction = join_interaction result.joinable_info = joinable_info return result @classproperty def proxy_name(cls): return '[Join]' @classproperty def allow_user_directed(cls): return True @classmethod def _can_rally(cls, *args, **kwargs): return False @classmethod def _test(cls, *args, **kwargs): return (super()._test)(args, join=True, **kwargs) @flexmethod def get_name(cls, inst, target=DEFAULT, context=DEFAULT, **kwargs): if inst is not None: return (super(JoinInteraction, inst).get_name)(target=target, context=context, **kwargs) join_target = (cls.get_participant)(participant_type=ParticipantType.JoinTarget, sim=context.sim, target=target, **kwargs) original_name = (super(JoinInteraction, cls).get_name)(target=target, context=context, **kwargs) localization_args = (original_name, join_target) if cls.joinable_info.join_available: if cls.joinable_info.join_available.loc_custom_join_name is not None: return (cls.joinable_info.join_available.loc_custom_join_name)(*localization_args) return (cls.create_join_solo_solo)(*localization_args) def run_pre_transition_behavior(self, *args, **kwargs): if self.join_interaction.has_been_canceled: self.cancel((FinishingType.INTERACTION_INCOMPATIBILITY), cancel_reason_msg='The joined interaction has been canceled.') return (super().run_pre_transition_behavior)(*args, **kwargs) def on_added_to_queue(self, *args, **kwargs): (super().on_added_to_queue)(*args, **kwargs) if self.joinable_info.link_joinable: self.join_interaction.add_liability(JOIN_INTERACTION_LIABILITY, JoinInteractionLiability(self)) class AskToJoinInteraction(ProxyInteraction, ImmediateSuperInteraction): create_invite_solo_any = TunableLocalizedStringFactory(default=974662056, description='Interaction name wrapper for inviting a solo Sim.') INSTANCE_SUBCLASSES_ONLY = True @classproperty def proxy_name(cls): return '[AskToJoin]' def __init__(self, *args, **kwargs): (ImmediateSuperInteraction.__init__)(self, *args, **kwargs) def _trigger_interaction_start_event(self): pass def _trigger_interaction_complete_test_event(self): pass @classmethod def generate(cls, proxied_affordance, join_sim, join_interaction, joinable_info): result = super().generate(proxied_affordance) result.join_sim = join_sim result.join_interaction = join_interaction result.joinable_info = joinable_info return result @classproperty def allow_autonomous(cls): return False @classproperty def allow_user_directed(cls): return True @classmethod def test(cls, target, context, **kwargs): join_context = context.clone_for_sim(cls.join_sim) return (cls.proxied_affordance.test)(target, join_context, join=True, **kwargs) @flexmethod def _get_name(cls, inst, target=DEFAULT, context=DEFAULT, **kwargs): inst_or_cls = inst if inst is not None else cls original_name = (super(ProxyInteraction, inst_or_cls)._get_name)(target=target, context=context, **kwargs) localization_args = (original_name, inst_or_cls.join_sim) if cls.joinable_info.invite_available: if cls.joinable_info.invite_available.loc_custom_invite_name is not None: return (cls.joinable_info.invite_available.loc_custom_invite_name)(*localization_args) return (inst_or_cls.create_invite_solo_any)(*localization_args) def _push_join_interaction(self, join_sim): join_interaction = JoinInteraction.generate((self.proxied_affordance), join_interaction=(self.join_interaction), joinable_info=(self.joinable_info)) join_context = InteractionContext(join_sim, (self.context.source), (self.priority), insert_strategy=(QueueInsertStrategy.NEXT)) (join_sim.push_super_affordance)(join_interaction, (self.target), join_context, **self.interaction_parameters) def _do_perform_gen(self, timeline): self._push_join_interaction(self.join_sim) return True if False: yield None @flexmethod def create_localized_string(cls, inst, localized_string_factory, *tokens, **kwargs): inst_or_cls = inst if inst is not None else cls interaction_tokens = (inst_or_cls.join_sim, inst_or_cls.join_interaction.sim) return localized_string_factory(*interaction_tokens + tokens) class AggregateSuperInteraction(SuperInteraction): INSTANCE_TUNABLES = {'aggregated_affordances':TunableList(description='\n A list of affordances composing this aggregate. Distance\n estimation will be used to break ties if there are multiple\n valid interactions at the same priority level.\n ', tunable=TunableTuple(description='\n An affordance and priority entry.\n ', priority=Tunable(description='\n The relative priority of this affordance compared to\n other affordances in this aggregate.\n ', tunable_type=int, default=0), affordance=SuperInteraction.TunableReference(description='\n The aggregated affordance.\n ', pack_safe=True)), tuning_group=GroupNames.GENERAL), 'sim_to_push_affordance_on':TunableEnumEntry(description='\n The Sim to push the affordance on. If this is Actor, the\n affordance will be pushed as a continuation of this.\n ', tunable_type=ParticipantType, default=ParticipantType.Actor, tuning_group=GroupNames.TRIGGERS), 'use_aggregated_affordance_constraints':Tunable(description="\n If enabled, this interaction will pull it's constraints from the\n interaction constraints of the aggregated affordances. The benefit\n is that we are compatible with interactions we intend to run, even\n if they have constraints different from one another. This prevents\n us from having to add a bunch of tests to those affordances and a\n generic constraint here.\n ", tunable_type=bool, default=False, tuning_group=GroupNames.CONSTRAINTS)} _allow_user_directed = True def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._valid_aops = None @classproperty def affordances(cls): return (a.affordance.get_interaction_type() for a in cls.aggregated_affordances) @classmethod def _aops_sorted_gen(cls, target, **interaction_parameters): affordances = [] for aggregated_affordance in cls.aggregated_affordances: aop = AffordanceObjectPair((aggregated_affordance.affordance), target, (aggregated_affordance.affordance), None, **interaction_parameters) affordances.append((aggregated_affordance.priority, aop)) return sorted(affordances, key=(operator.itemgetter(0)), reverse=True) @flexmethod def _get_tested_aops(cls, inst, target, context, **interaction_parameters): inst_or_cls = inst if inst is not None else cls if inst is not None: if inst._valid_aops is not None: return inst._valid_aops aops_valid = [] cls._allow_user_directed = False for priority, aop in (inst_or_cls._aops_sorted_gen)(target, **interaction_parameters): test_result = aop.test(context) if test_result: if aop.affordance.allow_user_directed: cls._allow_user_directed = True aops_valid.append((aop, priority)) if inst is not None: inst._valid_aops = aops_valid return aops_valid @flexmethod def test(cls, inst, target=DEFAULT, context=DEFAULT, super_interaction=None, skip_safe_tests=False, **interaction_parameters): inst_or_cls = inst if inst is not None else cls result = (super(__class__, inst_or_cls).test)(target=target, context=context, super_interaction=super_interaction, skip_safe_tests=skip_safe_tests, **interaction_parameters) if result: target = target if target is not DEFAULT else inst.target context = context if context is not DEFAULT else inst.context context = context.clone_for_sim(cls.get_participant(participant_type=(cls.sim_to_push_affordance_on), sim=(context.sim), target=target)) valid_aops = (inst_or_cls._get_tested_aops)(target, context, **interaction_parameters) result = TestResult.TRUE if valid_aops else TestResult(False, 'No sub-affordances passed their tests.') return result @classmethod def consumes_object(cls): for affordance_tuple in cls.aggregated_affordances: if affordance_tuple.affordance.consumes_object(): return True return False @classproperty def allow_user_directed(cls): return cls._allow_user_directed @flexmethod def _constraint_gen(cls, inst, sim, target, participant_type=ParticipantType.Actor, **kwargs): inst_or_cls = cls if inst is None else inst yield from (super(SuperInteraction, inst_or_cls)._constraint_gen)(sim, target, participant_type=participant_type, **kwargs) if inst_or_cls.use_aggregated_affordance_constraints: aggregated_constraints = [] affordances = [] if inst is not None: if inst._valid_aops is not None: affordances = [aop.super_affordance for aop, _ in inst._valid_aops] affordances = affordances if affordances else [affordance_tuple.affordance for affordance_tuple in inst_or_cls.aggregated_affordances] if not affordances: yield Nowhere for aggregated_affordance in affordances: intersection = ANYWHERE constraint_gen = aggregated_affordance.constraint_gen if aggregated_affordance.is_social: constraint_gen = super(SuperInteraction, aggregated_affordance)._constraint_gen for constraint in constraint_gen(sim, inst_or_cls.get_constraint_target(target), participant_type=participant_type, **kwargs): intersection = constraint.intersect(intersection) if not intersection.valid: continue aggregated_constraints.append(intersection) if aggregated_constraints: yield create_constraint_set(aggregated_constraints, debug_name='AggregatedConstraintSet') def _do_perform_gen(self, timeline): sim = self.get_participant(self.sim_to_push_affordance_on) if sim == self.context.sim: context = self.context.clone_for_continuation(self) else: context = context.clone_for_sim(sim) max_priority = None aops_valid = [] self._valid_aops = None valid_aops = (self._get_tested_aops)((self.target), context, **self.interaction_parameters) for aop, priority in valid_aops: if max_priority is not None: if priority < max_priority: break aops_valid.append(aop) max_priority = priority if not aops_valid: logger.warn('Failed to find valid super affordance in AggregateSuperInteraction: {}, did we not run its test immediately before executing it?', self) return ExecuteResult.NONE else: compatible_interactions = [] for aop in aops_valid: interaction_result = aop.interaction_factory(context) if not interaction_result: raise RuntimeError('Failed to generate interaction from aop {}. {} [rmccord]'.format(aop, interaction_result)) interaction = interaction_result.interaction if not self.use_aggregated_affordance_constraints or interactions.si_state.SIState.test_compatibility(interaction, force_concrete=True): compatible_interactions.append(interaction) return compatible_interactions or ExecuteResult.NONE interactions_by_distance = [] for interaction in compatible_interactions: if len(compatible_interactions) == 1: distance = 0 else: distance, _, _ = interaction.estimate_distance() if distance is not None: interactions_by_distance.append((distance, interaction)) else: interactions_by_distance.append((sims4.math.MAX_INT32, interaction)) _, interaction = min(interactions_by_distance, key=(operator.itemgetter(0))) return AffordanceObjectPair.execute_interaction(interaction) if False: yield None class AggregateMixerInteraction(MixerInteraction): INSTANCE_TUNABLES = {'aggregated_affordances': TunableList(description='\n A list of affordances composing this aggregate. A random one\n will be chosen from sub-action weights if multiple interactions\n pass at the same priority.\n ', tunable=TunableTuple(description='\n An affordance and priority entry.\n ', priority=Tunable(description='\n The relative priority of this affordance compared to\n other affordances in this aggregate.\n ', tunable_type=int, default=0), affordance=MixerInteraction.TunableReference(description='\n The aggregated affordance.\n ', pack_safe=True)), tuning_group=(GroupNames.GENERAL))} _allow_user_directed = True @classmethod def _aops_sorted_gen(cls, target, context, super_interaction=DEFAULT, **interaction_parameters): affordances = [] source_interaction = context.sim.posture.source_interaction if super_interaction == DEFAULT else super_interaction for aggregated_affordance in cls.aggregated_affordances: aop = AffordanceObjectPair((aggregated_affordance.affordance), target, (source_interaction.affordance), source_interaction, **interaction_parameters) affordances.append((aggregated_affordance.priority, aop)) return sorted(affordances, key=(operator.itemgetter(0)), reverse=True) @classmethod def _test(cls, target, context, **interaction_parameters): result = (super()._test)(target, context, **interaction_parameters) if not result: return result cls._allow_user_directed = False context = context.clone_for_sim(sim=(context.sim)) for _, aop in (cls._aops_sorted_gen)(target, context, **interaction_parameters): result = aop.test(context) if result: if aop.affordance.allow_user_directed: cls._allow_user_directed = True return result return TestResult(False, 'No sub-affordances passed their tests.') @classmethod def consumes_object(cls): for aggregated_affordance in cls.aggregated_affordances: if aggregated_affordance.affordance.consumes_object(): return True return False @classproperty def allow_user_directed(cls): return cls._allow_user_directed def _do_perform_gen(self, timeline): context = self.context.clone_for_continuation(self) max_priority = None aops_valid = [] invalid_aops_with_result = [] for priority, aop in (self._aops_sorted_gen)(self.target, context, super_interaction=self.super_interaction, **self.interaction_parameters): if max_priority is not None: if priority < max_priority: break test_result = aop.test(context) if test_result: aops_valid.append(aop) max_priority = priority else: invalid_aops_with_result.append((aop, test_result)) if not aops_valid:
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Aug 11 13:58:05 2019 Methodology: 0) run code with --prep drdump on all rinex files 1) run code with either --prep edit24hr or --prep edit30hr on all datarecords files 2) run code with --gd2e 30hr or --gd2e 24hr for either solve ppp on 24hr folder or 30hr folder @author: ziskin """ def move_files(path_orig, path_dest, files, out_files=None, verbose=False): """move files (a list containing the file names) and move them from path_orig to path_dest""" import shutil import logging logger = logging.getLogger('gipsyx') if isinstance(files, str): files = [files] if out_files is not None: if isinstance(out_files, str): out_files = [out_files] orig_filenames_paths = [path_orig / x for x in files] if out_files is None: out_files = files dest_filenames_paths = [path_dest / x for x in out_files] # delete files if size =0: for file, orig, dest in zip( files, orig_filenames_paths, dest_filenames_paths): # check for file existance in orig: if not orig.is_file(): if verbose: logger.warning('{} does not exist in {}'.format(file, orig)) continue # check if its size is 0: if orig.stat().st_size == 0: orig.resolve().unlink() else: shutil.move(orig.resolve(), dest.resolve()) return def read_organize_rinex(path, glob_str='*.Z', date_range=None): """read and organize the rinex file names for 30 hour run""" from aux_gps import get_timedate_and_station_code_from_rinex from aux_gps import path_glob from aux_gps import slice_task_date_range import pandas as pd import numpy as np import logging logger = logging.getLogger('gipsyx') dts = [] rfns = [] stations = [] logger.info('reading and organizing rinex files in {}'.format(path)) files = path_glob(path, glob_str) if date_range is not None: files = slice_task_date_range(files, date_range, 'read_organize_rinex') for file_and_path in files: filename = file_and_path.as_posix().split('/')[-1][0:12] dt, station = get_timedate_and_station_code_from_rinex(filename) stations.append(station) dts.append(dt) rfns.append(filename) # check for more station than one: if len(set(stations)) > 1: raise Exception('mixed station names in folder {}'.format(path)) df = pd.DataFrame(data=rfns, index=dts) df = df.sort_index() df = df[~df.index.duplicated()] full_time = pd.date_range(df.index[0], df.index[-1], freq='1D') df = df.reindex(full_time) df.columns = ['rinex'] df.index.name = 'time' df['30hr'] = np.nan df.iat[0, 1] = 0 df.iat[-1, 1] = 0 for i in range(1, len(df)-1): nums = np.array([i-1, i, i+1]) nan3days = df.iloc[nums, 0].isnull() if not nan3days[0] and not nan3days[1] and not nan3days[2]: # print('all') df.iat[i, 1] = 1 elif not nan3days[0] and not nan3days[1] and nan3days[2]: # print('0') df.iat[i, 1] = 0 elif nan3days[0] and not nan3days[1] and not nan3days[2]: # print('00') df.iat[i, 1] = 0 elif not nan3days[1] and nan3days[0] and nan3days[2]: # print('000') df.iat[i, 1] = 0 # print(i, nan3days, df.iat[i, 1]) # input("Press Enter to continue...") return df def check_path(path): import os from pathlib import Path path = str(path) if not os.path.exists(path): raise argparse.ArgumentTypeError(path + ' does not exist...') return Path(path) def check_file_in_cwd(filename): from pathlib import Path cwd = Path().cwd() file_and_path = cwd / filename if not file_and_path.is_file(): raise argparse.ArgumentTypeError( '{} does not exist at {}'.format( filename, cwd)) return file_and_path def prepare_gipsyx_for_run_one_station(rinexpath, staDb, prep, rewrite, date_range=None): """rinex editing and merging command-line utility, 3 values for prep: 0) drdump: run dataRecordDump on all rinex files in rinexpath 1) edit24hr: run rnxEditGde.py with staDb on all datarecords files in rinexpath / dr, savethem to rinexpath / 24hr 2) edit30hr: run drMerge.py on 3 consecutive datarecords files in rinexpath / dr (rnxEditGde.py with staDb on lonely datarecords files) and then rnxEditGde.py with staDb on merged datarecords files, save them to rinexpath / 30hr rewrite: overwrite all files - supported with all modes of prep""" import subprocess from subprocess import CalledProcessError from subprocess import TimeoutExpired from aux_gps import slice_task_date_range from aux_gps import path_glob import logging import pandas as pd global cnt global tot # from itertools import count from pathlib import Path def run_dataRecorDump_on_all_files(rinexpath, out_path, rewrite, date_range=None): """runs dataRecordDump on all files in rinexpath(where all and only rinex files exist), saves the datarecord files to out_path. rewrite is a flag that overwrites the files in out_path even if they already exist there.""" logger.info('running dataRecordDump...') est_time_per_single_run = 1.0 # seconds out_path.mkdir(parents=True, exist_ok=True) files = path_glob(rinexpath, '*.Z') files_already_done = path_glob(out_path, '*.dr.gz', True) if date_range is not None: files = slice_task_date_range(files, date_range, 'drdump') files_already_done = slice_task_date_range(files_already_done, date_range, 'already done drdump') tot = len(files) logger.info('found {} rinex Z files in {} to run.'.format(tot, rinexpath)) tot_final = len(files_already_done) logger.info('found {} data records dr.gz files in {}'.format(tot_final, out_path)) tot_to_run = tot - tot_final dtt = pd.to_timedelta(est_time_per_single_run, unit='s') * tot_to_run logger.info('estimated time to completion of run: {}'.format(dtt)) logger.info('check again in {}'.format(pd.Timestamp.now() + dtt)) for file_and_path in files: filename = file_and_path.as_posix().split('/')[-1][0:12] dr_file = out_path / '{}.dr.gz'.format(filename) if not rewrite: if (dr_file).is_file(): logger.warning( '{} already exists in {}, skipping...'.format( filename + '.dr.gz', out_path)) cnt['succ'] += 1 continue logger.info('processing {} ({}/{})'.format( filename, cnt['succ'] + cnt['failed'], tot)) files_to_move = [filename + x for x in ['.log', '.err']] command = 'dataRecordDump -rnx {} -drFileNmOut {} > {}.log 2>{}.err'.format( file_and_path.as_posix(), dr_file.as_posix(), filename, filename) try: subprocess.run(command, shell=True, check=True) # next(succ) cnt['succ'] += 1 except CalledProcessError: logger.error('dataRecordDump failed on {}...'.format(filename)) # next(failed) cnt['failed'] += 1 except TimeoutExpired: logger.error('dataRecordDump timed out on {}, copying log files.'.format(rfn)) # next(failed) cnt['failed'] += 1 with open(Path().cwd() / files_to_move[1], 'a') as f: f.write('dataRecordDump has Timed out !') move_files(Path().cwd(), out_path, files_to_move) return def run_rnxEditGde(filename, in_path, out_path, rewrite, suffix=24): """runs rnxEditGde on filename that exists in in_path and writes the edited file (with suffix) to out_path. it first checks wether filename exists in out_path and if it is, it skipps this filename. rewrite flag overwrites the filename regardless.""" rfn = filename[0:12] station = rfn[0:4].upper() dr_edited_file = out_path / '{}_edited{}hr.dr.gz'.format(rfn, suffix) file_and_path = in_path / filename if not rewrite: if (dr_edited_file).is_file(): logger.warning( '{} already exists in {}, skipping...'.format( filename, out_path)) cnt['succ'] += 1 return logger.info( 'processing {} ({}, {}/{})'.format( filename, date.strftime('%Y-%m-%d'), cnt['succ'] + cnt['failed'], tot)) files_to_move = [rfn + x for x in ['.log', '.err']] command = 'rnxEditGde.py -type datarecord -recNm {} -data {} -out {} -staDb {} > {}.log 2>{}.err'.format( station, file_and_path.as_posix(), dr_edited_file.as_posix(), staDb.as_posix(), rfn, rfn) try: subprocess.run(command, shell=True, check=True) # next(succ) cnt['succ'] += 1 except CalledProcessError: # next(failed) cnt['failed'] += 1 logger.error('rnxEditGde.py failed on {}...'.format(filename)) except TimeoutExpired: logger.error('rnxEditGde.py timed out on {}, copying log files.'.format(rfn)) # next(failed) cnt['failed'] += 1 with open(Path().cwd() / files_to_move[1], 'a') as f: f.write('rnxEditGde.py has Timed out !') move_files(Path().cwd(), out_path, files_to_move) return def run_drMerge(filenames, in_path, duration): from aux_gps import get_timedate_and_station_code_from_rinex rfns = [x[0:12] for x in filenames] dts = [get_timedate_and_station_code_from_rinex(x, True) for x in rfns] if duration == '30hr': start = dts[0].strftime('%Y-%m-%d') + ' 21:00:00' end = dts[2].strftime('%Y-%m-%d') + ' 03:00:00' dr_merged_file = Path().cwd() / '{}_merged.dr.gz'.format(rfns[1]) logger.info('merging {}, {} and {} to {}'.format(*rfns,rfns[1] + '_merged.dr.gz')) f_and_paths = [in_path / x for x in filenames] files_to_move = [rfn + x for x in ['_drmerge.log', '_drmerge.err']] command = 'drMerge.py -inFiles {} {} {} -outFile {} -start {} -end {} > {}.log 2>{}.err'.format( f_and_paths[0].as_posix(), f_and_paths[1].as_posix(), f_and_paths[2].as_posix(), dr_merged_file.as_posix(), start, end, rfn + '_drmerge', rfn + '_drmerge') try: subprocess.run(command, shell=True, check=True, timeout=60) except CalledProcessError: logger.error('drMerge.py failed on {}...'.format(filenames)) except TimeoutExpired: logger.error('drMerge.py timed out on {}, copying log files.'.format(filenames)) # next(failed) cnt['failed'] += 1 with open(Path().cwd() / files_to_move[1], 'a') as f: f.write('drMerge.py run has Timed out !') return None move_files(Path().cwd(), Path().cwd(), files_to_move) return rfns[1] + '_merged.dr.gz' logger = logging.getLogger('gipsyx') logger.info( 'starting preparation utility utility for gipsyX run.') logger.info('working with {}'.format(staDb)) if rewrite: logger.warning('overwrite files mode initiated.') rinex_df = read_organize_rinex(rinexpath, date_range=date_range) cnt = {'succ': 0, 'failed': 0} # succ = count(1) # failed = count(1) # rinex_df = rinex_df.fillna(999) dr_path = rinexpath / 'dr' if prep == 'drdump': run_dataRecorDump_on_all_files(rinexpath, dr_path, rewrite, date_range=None) elif prep == 'edit24hr': logger.info('running rnxEditGde.py with 24hr setting for all files.') hr24 = rinexpath / '24hr' try: hr24.mkdir() except FileExistsError: logger.info('{} already exists, using that folder.'.format(hr24)) for date, row in rinex_df.iterrows(): rfn = row['rinex'] if pd.isna(rfn): continue filename = rfn + '.dr.gz' run_rnxEditGde(filename, dr_path, hr24, rewrite) elif prep == 'edit30hr': logger.info( 'running drMerge.py/rnxEditGde.py with 30hr setting for all files(when available).') hr30 = rinexpath / '30hr' est_time_per_single_run = 4.0 # seconds tot = rinex_df['30hr'].value_counts().sum() # len(path_glob(dr_path, '*.dr.gz')) logger.info('found {} data records dr.gz files in {} to run.'.format(tot, dr_path)) files_already_done = path_glob(hr30, '*.dr.gz', True) if date_range is not None: files_already_done
" correctly. anywhere. # TODO this used to turn "1.5%" into empty string; why does error # not work? m = SELECTOR_TOKENIZER.match(selector, pos) if not m: # TODO prettify me raise SyntaxError("Couldn't parse selector: %r" % (selector,)) token = m.group(0) pos += len(token) # Kill any extraneous space, BUT make sure not to turn a lone space # into an empty string token = token.strip() or ' ' if token == ',': # End current selector promote_selector() elif token in ' +>~': # End current simple selector promote_simple() pending['combinator'] = token else: # Add to pending simple selector pending['tokens'].append(token) # Deal with any remaining pending bits promote_selector() return ret @classmethod def parse_one(cls, selector_string): selectors = cls.parse_many(selector_string) if len(selectors) != 1: # TODO better error raise ValueError return selectors[0] def __repr__(self): return "<%s: %r>" % (type(self).__name__, self.render()) def __hash__(self): return hash(self.simple_selectors) def __eq__(self, other): if not isinstance(other, Selector): return NotImplemented return self.simple_selectors == other.simple_selectors @property def has_parent_reference(self): for simple in self.simple_selectors: if simple.has_parent_reference: return True return False @property def has_placeholder(self): for simple in self.simple_selectors: if simple.has_placeholder: return True return False def with_parent(self, parent): saw_parent_ref = False new_simples = [] for simple in self.simple_selectors: if simple.has_parent_reference: new_simples.extend(simple.replace_parent(parent.simple_selectors)) saw_parent_ref = True else: new_simples.append(simple) if not saw_parent_ref: new_simples = parent.simple_selectors + tuple(new_simples) return type(self)(new_simples) def lookup_key(self): """Build a key from the "important" parts of a selector: elements, classes, ids. """ parts = set() for node in self.simple_selectors: for token in node.tokens: if token[0] not in ':[': parts.add(token) if not parts: # Should always have at least ONE key; selectors with no elements, # no classes, and no ids can be indexed as None to avoid a scan of # every selector in the entire document parts.add(None) return frozenset(parts) def is_superset_of(self, other): assert isinstance(other, Selector) idx = 0 for other_node in other.simple_selectors: if idx >= len(self.simple_selectors): return False while idx < len(self.simple_selectors): node = self.simple_selectors[idx] idx += 1 if node.is_superset_of(other_node): break return True def substitute(self, target, replacement): """Return a list of selectors obtained by replacing the `target` selector with `replacement`. Herein lie the guts of the Sass @extend directive. In general, for a selector ``a X b Y c``, a target ``X Y``, and a replacement ``q Z``, return the selectors ``a q X b Z c`` and ``q a X b Z c``. Note in particular that no more than two selectors will be returned, and the permutation of ancestors will never insert new simple selectors "inside" the target selector. """ # Find the target in the parent selector, and split it into # before/after p_before, p_extras, p_after = self.break_around(target.simple_selectors) # The replacement has no hinge; it only has the most specific simple # selector (which is the part that replaces "self" in the parent) and # whatever preceding simple selectors there may be r_trail = replacement.simple_selectors[:-1] r_extras = replacement.simple_selectors[-1] # TODO what if the prefix doesn't match? who wins? should we even get # this far? focal_nodes = (p_extras.merge_into(r_extras),) befores = _merge_selectors(p_before, r_trail) cls = type(self) return [ cls(before + focal_nodes + p_after) for before in befores] def break_around(self, hinge): """Given a simple selector node contained within this one (a "hinge"), break it in half and return a parent selector, extra specifiers for the hinge, and a child selector. That is, given a hinge X, break the selector A + X.y B into A, + .y, and B. """ hinge_start = hinge[0] for i, node in enumerate(self.simple_selectors): # In this particular case, a ' ' combinator actually means "no" (or # any) combinator, so it should be ignored if hinge_start.is_superset_of(node, soft_combinator=True): start_idx = i break else: raise ValueError( "Couldn't find hinge %r in compound selector %r" % (hinge_start, self.simple_selectors)) for i, hinge_node in enumerate(hinge): if i == 0: # We just did this continue self_node = self.simple_selectors[start_idx + i] if hinge_node.is_superset_of(self_node): continue # TODO this isn't true; consider finding `a b` in `a c a b` raise ValueError( "Couldn't find hinge %r in compound selector %r" % (hinge_node, self.simple_selectors)) end_idx = start_idx + len(hinge) - 1 focal_node = self.simple_selectors[end_idx] extras = focal_node.difference(hinge[-1]) return ( self.simple_selectors[:start_idx], extras, self.simple_selectors[end_idx + 1:]) def render(self): return ' '.join(simple.render() for simple in self.simple_selectors) def _merge_selectors(left, right): """Given two selector chains (lists of simple selectors), return a list of selector chains representing elements matched by both of them. This operation is not exact, and involves some degree of fudging -- the wackier and more divergent the input, the more fudging. It's meant to be what a human might expect rather than a precise covering of all possible cases. Most notably, when the two input chains have absolutely nothing in common, the output is merely ``left + right`` and ``right + left`` rather than all possible interleavings. """ if not left or not right: # At least one is empty, so there are no conflicts; just return # whichever isn't empty. Remember to return a LIST, though return [left or right] lcs = longest_common_subsequence(left, right, _merge_simple_selectors) ret = [()] # start with a dummy empty chain or weaving won't work left_last = 0 right_last = 0 for left_next, right_next, merged in lcs: ret = _weave_conflicting_selectors( ret, left[left_last:left_next], right[right_last:right_next], (merged,)) left_last = left_next + 1 right_last = right_next + 1 ret = _weave_conflicting_selectors( ret, left[left_last:], right[right_last:]) return ret def _weave_conflicting_selectors(prefixes, a, b, suffix=()): """Part of the selector merge algorithm above. Not useful on its own. Pay no attention to the man behind the curtain. """ # OK, what this actually does: given a list of selector chains, two # "conflicting" selector chains, and an optional suffix, return a new list # of chains like this: # prefix[0] + a + b + suffix, # prefix[0] + b + a + suffix, # prefix[1] + a + b + suffix, # ... # In other words, this just appends a new chain to each of a list of given # chains, except that the new chain might be the superposition of two # other incompatible chains. both = a and b for prefix in prefixes: yield prefix + a + b + suffix if both: # Only use both orderings if there's an actual conflict! yield prefix + b + a + suffix def _merge_simple_selectors(a, b): """Merge two simple selectors, for the purposes of the LCS algorithm below. In practice this returns the more specific selector if one is a subset of the other, else it returns None. """ # TODO what about combinators if a.is_superset_of(b): return b elif b.is_superset_of(a): return a else: return None def longest_common_subsequence(a, b, mergefunc=None): """Find the longest common subsequence between two iterables. The longest common subsequence is the core of any diff algorithm: it's the longest sequence of elements that appears in both parent sequences in the same order, but NOT necessarily consecutively. Original algorithm borrowed from Wikipedia: http://en.wikipedia.org/wiki/Longest_common_subsequence_problem#Code_for_the_dynamic_programming_solution This function is used only to implement @extend, largely because that's what the Ruby implementation does. Thus it's been extended slightly from the simple diff-friendly algorithm given above. What @extend wants to know is whether two simple selectors are compatible, not just equal. To that end, you must pass in a "merge" function to compare a pair of elements manually. It should return `None` if they are incompatible, and a MERGED element if they are compatible -- in the case of selectors, this is whichever one is more specific. Because of this fuzzier notion of equality, the return value is a list of ``(a_index, b_index, value)`` tuples rather than items alone. """ if mergefunc is None: # Stupid default, just in case def mergefunc(a, b): if a == b: return a return None # Precalculate equality, since it can be a tad expensive and every pair is # compared at least once eq = {} for ai, aval in enumerate(a): for bi,
<filename>reports_generator.py # -*- coding: utf-8 -*- import MySQLdb import pandas as pd import configparser import xlsxwriter import datetime import os import argparse from collections import defaultdict from working_time import compute_working_time config = configparser.ConfigParser() config.read('settings.ini') BASE_DIR = 'Z:\Отчеты OTRS\CallCenter' report_dates = configparser.ConfigParser() report_dates.read(os.path.join(BASE_DIR, 'report_dates.ini')) CURRENT_DATE = datetime.datetime.now().strftime('%Y-%m-%d') CURRENT_DATE_TIME = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') TOMORROW = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') YESTERDAY = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime('%Y-%m-%d') MAX_WORKING_DAYS_DICT = { ('2019-04-11', '2019-07-29'): 10, ('2019-07-29', '2019-07-16'): 5, ('2019-07-29', TOMORROW): 3, } db = MySQLdb.connect(config['CONNECTION']['HOST'], config['CONNECTION']['USER'], config['CONNECTION']['PASSWORD'], config['CONNECTION']['DATABASE'], charset='utf8', init_command='SET NAMES UTF8') class RecordTypes: VOLUNTEERS = {'code': 11, 'name': 'Вызов волонтеров на подключение оборудования'} OTHER = {'code': 14, 'name': 'Иное'} PURCHASE = {'code': 9, 'name': 'Выбор и покупка приемного оборудования (телевизор, приставка, антенна)'} SOCIAL = {'code': 8, 'name': 'Социальная поддержка льготных категорий граждан'} COMPLAINTS = {'code': 15, 'name': 'Жалобы'} BROADCASTING_OUTSIDE = {'code': 10, 'name': 'Вещание на территориях вне зоны цифрового сигнала'} CONNECTION = {'code': 12, 'name': 'Подключение к системе коллективного приема телевидения (СКПТ)'} BROADCASTING_REGIONAL = {'code': 13, 'name': 'Вещание региональных каналов'} TOTAL = {'code': None, 'name': 'Итого'} DATA = {'code': None, 'name': 'Дата'} @staticmethod def get_record_types(): return [k for k in RecordTypes.__dict__.keys() if not k.startswith('_') and k.isupper()] @staticmethod def get_queues(): record_types = RecordTypes.get_record_types() return [k for k in record_types if getattr(RecordTypes, k)['code']] @staticmethod def get_record_queue_by_code(code): queues = RecordTypes.get_queues() for k in queues: if getattr(RecordTypes, k)['code'] == code: return getattr(RecordTypes, k) return None def get_max_working_days(date): for key in MAX_WORKING_DAYS_DICT.keys(): if datetime.datetime.strptime(key[0], '%Y-%m-%d') < datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') < \ datetime.datetime.strptime(key[1], '%Y-%m-%d'): return MAX_WORKING_DAYS_DICT[key] raise ValueError("Incorrect date") class Report: def __init__(self, **kwargs): self.cursor = db.cursor(MySQLdb.cursors.DictCursor) self.data = None self.form = None self.form_name = None self.form_verbose_name = None self.header = None self.start_date = None self.end_date = None self.daily = False self.result_folder_path = BASE_DIR if 'daily' in kwargs: if kwargs['daily']: self.daily = kwargs['daily'] if 'path' in kwargs: if kwargs['path']: self.result_folder_path = kwargs['path'] def get_data_from_db(self, filename, *args): sql_form = open(filename).read() if args: self.cursor.execute(sql_form.format(*args)) else: self.cursor.execute(sql_form) self.data = self.cursor.fetchall() def init_dates(self): if self.daily: self.start_date = CURRENT_DATE elif 'START_DATE' in report_dates[self.form_name]: self.start_date = report_dates[self.form_name]['START_DATE'] if 'END_DATE' not in report_dates[self.form_name]: self.end_date = TOMORROW else: self.end_date = report_dates[self.form_name]['END_DATE'] def data_to_form_template(self): pass def form_to_file(self): pass def form_to_excel_by_territory(self, column_range, header_merge_range): if not self.form: return file_name = self.form_verbose_name + datetime.date.today().strftime("_%d_%m_%Y") + '.xlsx' folder_path = os.path.join(self.result_folder_path, self.form_verbose_name) if not os.path.exists(folder_path): os.makedirs(folder_path) workbook = xlsxwriter.Workbook(os.path.join(folder_path, file_name)) for record in self.form.items(): worksheet = workbook.add_worksheet(name=record[0]) worksheet.set_column(column_range, 20) worksheet.set_row(0, 30) worksheet.set_row(1, 80) header_format = self.get_header_format(workbook) row_format = self.get_row_format(workbook) worksheet.merge_range(header_merge_range, record[0], header_format) for idx, key in enumerate(self.header): worksheet.write(1, idx, key, header_format) for row_idx, data in enumerate(record[1], start=2): for col_idx, (key, value) in enumerate(data.items()): worksheet.write(row_idx, col_idx, value, row_format) workbook.close() def form_to_excel_aggregated(self, column_range): if not self.form: return file_name = self.form_verbose_name + datetime.date.today().strftime("_%d_%m_%Y") + '.xlsx' folder_path = os.path.join(self.result_folder_path, self.form_verbose_name) if not os.path.exists(folder_path): os.makedirs(folder_path) workbook = xlsxwriter.Workbook(os.path.join(folder_path, file_name)) worksheet = workbook.add_worksheet() worksheet.set_column('A:B', 40) worksheet.set_column(column_range, 20) worksheet.set_row(0, 80) header_format = self.get_header_format(workbook) row_format = self.get_row_format(workbook) for idx, key in enumerate(self.header): worksheet.write(0, idx, key, header_format) row_idx = 1 for record in self.form.items(): for data in record[1]: worksheet.write(row_idx, 0, record[0], row_format) for col_idx, (key, value) in enumerate(data.items(), start=1): worksheet.write(row_idx, col_idx, value, row_format) row_idx += 1 workbook.close() @staticmethod def get_header_format(workbook): header_format = workbook.add_format() header_format.set_bold() header_format.set_text_wrap() header_format.set_align('center') header_format.set_align('vcenter') return header_format @staticmethod def get_row_format(workbook): row_format = workbook.add_format() row_format.set_text_wrap() row_format.set_align('left') row_format.set_align('top') return row_format class RecordForm542: def __init__(self): self.name = '' self.locality = '' self.address = '' self.phone_number = '' self.create_time = '' self.closed = '' self.complaint = 0 self.ticket_number = '' self.volunteers = '' class RecordBadGuysForm: def __init__(self): self.ticket_number = '' self.theme = '' self.create_time = '' self.reopened_dates = '' class ReportForm01(Report): def __init__(self, **kwargs): super().__init__(**kwargs) self.form_name = 'REPORT_FORM_01' self.init_dates() self.form_verbose_name = 'Форма_1' self.records = [RecordTypes.PURCHASE, RecordTypes.SOCIAL, RecordTypes.BROADCASTING_OUTSIDE, RecordTypes.VOLUNTEERS, RecordTypes.CONNECTION, RecordTypes.BROADCASTING_REGIONAL, RecordTypes.OTHER, RecordTypes.TOTAL, RecordTypes.COMPLAINTS] def get_data_from_db(self, filename='form_01.sql', *args): super(ReportForm01, self).get_data_from_db(filename, self.start_date, self.end_date) def data_to_form_template(self): _values = list(set(map(lambda x: x['name'], self.records))) _index = list(set(map(lambda x: x['value_text'], self.data))) df = pd.DataFrame(0, index=_index, columns=_values) for row in self.data: if row['complaints'] is not None: df.at[row['value_text'], RecordTypes.COMPLAINTS['name']] = row['complaints'] record_type = RecordTypes.get_record_queue_by_code(row['ticket_type_id']) df.at[row['value_text'], record_type['name']] = row['frequency'] df.at[RecordTypes.TOTAL['name']] = 0 df.T.at[RecordTypes.TOTAL['name']] = 0 for key in df.keys(): df.at[RecordTypes.TOTAL['name'], key] = sum(df[key]) total_df = df.drop(RecordTypes.COMPLAINTS['name'], axis=1) for row in total_df.T.keys(): df.at[row, RecordTypes.TOTAL['name']] = sum(total_df.T[row]) self.form = df def form_to_file(self): self.form_to_excel() def form_to_csv(self): pass def form_to_excel(self): if self.form is None: self.data_to_form_template() file_name = self.form_verbose_name + datetime.date.today().strftime("_%d_%m_%Y") + '.xlsx' folder_path = os.path.join(self.result_folder_path, self.form_verbose_name) if not os.path.exists(folder_path): os.makedirs(folder_path) workbook = xlsxwriter.Workbook(os.path.join(folder_path, file_name)) worksheet = workbook.add_worksheet() worksheet.set_column('A:H', 20) worksheet.set_row(0, 80) header_format = self.get_header_format(workbook) row_format = self.get_row_format(workbook) worksheet.write(0, 0, 'Наименование ОМСУ', header_format) for idx, record in enumerate(self.records, start=1): worksheet.write(0, idx, record['name'], header_format) for row in range(1, len(self.form) + 1): for col in range(1, len(self.records) + 1): worksheet.write(row, col, 0, row_format) for idx_row, (row_name, row_data) in enumerate(self.form.iterrows(), start=1): worksheet.write(idx_row, 0, row_name) for col_name, col_value in row_data.items(): for idx_theme, record in enumerate(self.records, start=1): if record['name'] == col_name: worksheet.write(idx_row, idx_theme, col_value, row_format) break workbook.close() class ReportForm542(Report): """ Tickets that was closed for ten days """ def __init__(self, **kwargs): super().__init__(**kwargs) self.form_name = 'REPORT_FORM_54_2' self.init_dates() self.form_verbose_name = 'Форма_5_4_2' self.header = [ 'ФИО', 'Населенный пункт', 'Точный адрес', 'Телефонный номер', 'Дата открытия', 'Дата закрытия', 'Жалоба', 'Номер заявки', 'Волонтёры', ] self.municipalities = 'Муниципальные образования' def get_data_from_db(self, filename='form_54_2.sql', *args): super(ReportForm542, self).get_data_from_db(filename, self.start_date, self.end_date) def data_to_form_template(self): df = pd.DataFrame.from_records(self.data) if df.empty: return ticket_ids = list(set(df[df['closed'].notnull()]['ticket_id'])) data = defaultdict(list) for _id in ticket_ids: ticket_df = df[df['ticket_id'] == _id] create_time = ticket_df[ticket_df['field_id'] == 14]['create_time'].astype(str).iloc[0] max_working_days = get_max_working_days(create_time) closed = ticket_df[ticket_df['field_id'] == 14]['closed'].astype(str).iloc[0] if compute_working_time(create_time, closed) > int(max_working_days) * 24: continue record = RecordForm542() if not ticket_df[ticket_df['field_id'] == 12]['value_text'].empty: record.name = ticket_df[ticket_df['field_id'] == 12]['value_text'].iloc[0] if not ticket_df[ticket_df['field_id'] == 15]['value_text'].empty: record.locality = ticket_df[ticket_df['field_id'] == 15]['value_text'].iloc[0] if not ticket_df[ticket_df['field_id'] == 17]['value_text'].empty: record.address = ticket_df[ticket_df['field_id'] == 17]['value_text'].iloc[0] if not ticket_df[ticket_df['field_id'] == 16]['value_text'].empty: record.phone_number = ticket_df[ticket_df['field_id'] == 16]['value_text'].iloc[0] if not ticket_df[ticket_df['field_id'] == 37]['value_int'].empty: record.complaint = ticket_df[ticket_df['field_id'] == 37]['value_int'].iloc[0] if not ticket_df[ticket_df['field_id'] == 39]['value_text'].empty: record.volunteers = ticket_df[ticket_df['field_id'] == 39]['value_text'].iloc[0] if not ticket_df[ticket_df['field_id'] == 40]['value_text'].empty: record.volunteers += '; ' + ticket_df[ticket_df['field_id'] == 40]['value_text'].iloc[0] record.ticket_number = str(ticket_df['tn'].iloc[0]) record.create_time = str(ticket_df['create_time'].iloc[0]) record.closed = str(ticket_df['closed'].iloc[0]) data[ticket_df[ticket_df['field_id'] == 14]['value_text'].iloc[0]].append(record.__dict__) self.form = dict(data) def form_to_file(self): column_range = 'A:I' header_merge_range = 'A1:I1' self.form_to_excel_by_territory(column_range, header_merge_range) folder_path = os.path.join(self.result_folder_path, self.form_verbose_name, self.municipalities) if not os.path.exists(folder_path): os.makedirs(folder_path) for record in self.form.items(): file_name = record[0] + '.xlsx' workbook = xlsxwriter.Workbook(os.path.join(folder_path, file_name)) worksheet = workbook.add_worksheet() worksheet.set_column(column_range, 20) worksheet.set_row(0, 30) worksheet.set_row(1, 80) header_format = self.get_header_format(workbook) row_format = self.get_row_format(workbook) worksheet.merge_range(header_merge_range, record[0], header_format) for idx, key in enumerate(self.header): worksheet.write(1, idx, key, header_format) for row_idx, data in enumerate(record[1], start=2): for col_idx, (key, value) in enumerate(data.items()): worksheet.write(row_idx, col_idx, value, row_format) workbook.close() class ReportForm543(Report): def __init__(self, **kwargs): super().__init__(**kwargs) self.form_name = 'REPORT_FORM_54_3' self.init_dates() self.form_verbose_name = 'Форма_5_4_3' self.header = [ 'Наименование ОМСУ', 'Количество заявок', 'Количество закрытых заявок', 'Количество вовремя закрытых заявок', 'Количество просроченных закрытых заявок', 'Количество просроченных открытых заявок', 'Количество открытых непросроченных заявок', 'Процент закрытых заявок в течение 5 дней от количества закрытых заявок', ] def get_data_from_db(self, filename='form_54_2.sql', *args): super(ReportForm543, self).get_data_from_db(filename, self.start_date, self.end_date) def data_to_form_template(self): df = pd.DataFrame.from_records(self.data) if df.empty: return ticket_ids = list(set(df['ticket_id'])) data = dict() data['Итого'] = {'total_tickets': 0, 'closed': 0, 'closed_on_time': 0, 'expired_closed': 0, 'expired_open': 0, 'opened_not_expired': 0, 'percent': 0} for _id in ticket_ids: ticket_df = df[df['ticket_id'] == _id] if not ticket_df[ticket_df['field_id'] == 14]['value_text'].empty: name = ticket_df[ticket_df['field_id'] == 14]['value_text'].item() if name not in data.keys(): data[name] = {'total_tickets': 0, 'closed': 0, 'closed_on_time': 0, 'expired_closed': 0, 'expired_open': 0, 'opened_not_expired': 0, 'percent': 0} else: continue create_time = ticket_df[ticket_df['field_id'] == 14]['create_time'].astype(str).item() is_new_ticket = False if self.start_date.split().__len__() == 2: if datetime.datetime.strptime(create_time, '%Y-%m-%d %H:%M:%S') > datetime.datetime.strptime(self.start_date, '%Y-%m-%d %H-%M'): data[name]['total_tickets'] += 1 data['Итого']['total_tickets'] += 1 is_new_ticket = True else: if datetime.datetime.strptime(create_time, '%Y-%m-%d %H:%M:%S') > datetime.datetime.strptime(self.start_date, '%Y-%m-%d'): data[name]['total_tickets'] += 1 data['Итого']['total_tickets'] += 1 is_new_ticket = True max_working_days = get_max_working_days(create_time) last_action_time = ticket_df[ticket_df['field_id'] == 14]['last_action_time'].astype(str).item() ticket_state_id = ticket_df[ticket_df['field_id'] == 14]['ticket_state_id'].item() if ticket_state_id in (2, 3, 10) and last_action_time: data[name]['closed'] += 1 data['Итого']['closed'] += 1 if compute_working_time(create_time, last_action_time) <= int(max_working_days) * 24: data[name]['closed_on_time'] += 1 data['Итого']['closed_on_time'] += 1 else: data[name]['expired_closed'] += 1 data['Итого']['expired_closed'] += 1 elif compute_working_time(create_time, CURRENT_DATE_TIME) > int(max_working_days) * 24 and is_new_ticket: data[name]['expired_open'] += 1 data['Итого']['expired_open'] += 1 elif is_new_ticket: data[name]['opened_not_expired'] += 1 data['Итого']['opened_not_expired'] += 1 if data[name]['closed'] == 0: data[name]['percent'] = 0 else: data[name]['percent'] = data[name]['closed_on_time'] / data[name]['closed'] if data['Итого']['closed'] != 0: data['Итого']['percent'] = data['Итого']['closed_on_time']
__, entity in entities: if not entity and not with_disabled: continue if with_disabled: for version in entity.all_versions: if version.is_renderable(allow_disabled=True): yield version else: if entity.is_renderable(): yield entity @classmethod def list_content(cls, holder, entity_class, with_disabled): for entity in cls.iter_content(holder, entity_class, with_disabled): print(FC.get_args_as_json(entity)) FC = FilterCommands FC.combine_options() @cli.command() @FC.options["directory"] @FC.options["verbosity"] def get_deck_info(directory): """Get some information about the deck""" try: info = Manager.get_info_from_model_file(directory) for key in ("device", "flip_horizontal", "flip_vertical", "rotation"): del info[key] print(json.dumps(info)) except Exception: Manager.exit(1, f'Unable to read information from directory "{directory}') @cli.command() @FC.options["directory"] @FC.options["verbosity"] def get_current_page(directory): """Get the current page""" try: page_info = json.loads((Path(directory) / Deck.current_page_file_name).read_text().strip()) if set(page_info.keys()) != {"number", "name", "is_overlay"}: raise ValueError if (number := page_info["number"]) is None: if page_info["name"] is not None or page_info["is_overlay"] is not None: raise ValueError else: if ( not isinstance(number, int) or not isinstance(page_info["name"], (str, NoneType)) or not isinstance(page_info["is_overlay"], bool) ): raise ValueError print(json.dumps(page_info)) except Exception: Manager.exit(1, f'Unable to read current page information from directory "{directory}"') @cli.command() @FC.options["directory"] @cloup.option( "-p", "--page", "page_filter", type=str, required=True, help="A page number or a name, or one of " + ", ".join(f'"{page_code}"' for page_code in PAGE_CODES), ) @FC.options["verbosity"] def set_current_page(directory, page_filter): """Set the current page""" if page_filter not in PAGE_CODES: FC.find_page( FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ), page_filter, ) path = Path(directory) / Deck.set_current_page_file_name try: if path.exists(): path.unlink() path.write_text(page_filter) except Exception: Manager.exit(1, f'Unable to write current page information into directory "{directory}"') @cli.command() @FC.options["directory"] @FC.options["verbosity"] def get_brightness(directory): """Get the current deck brightness""" try: path = Path(directory) / Deck.current_brightness_file_name print(int(path.read_text().strip())) except Exception: Manager.exit(1, f'Unable to read current brightness information from directory "{directory}"') @cli.command() @FC.options["directory"] @cloup.option( "-b", "--brightness", "level", type=int, required=True, help="Brightness level, from 0 (no light) to 100 (brightest)", callback=FC.validate_brightness_level, ) @FC.options["verbosity"] def set_brightness(directory, level): """Set the deck brightness""" path = Path(directory) / Deck.current_brightness_file_name try: if path.exists(): path.unlink() path.write_text(str(level)) except Exception: Manager.exit(1, f'Unable to write brightness information into directory "{directory}"') @cli.command() @FC.options["directory"] @FC.options["disabled_flag"] @FC.options["verbosity"] def list_pages(directory, with_disabled): """List the page of the deck""" deck = FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ) FC.list_content(deck, Page, with_disabled=with_disabled) @cli.command() @FC.options["page_filter"] @FC.options["verbosity"] def get_page_path(directory, page_filter): """Get the path of a page.""" page = FC.find_page( FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ), page_filter, ) print(page.path) @cli.command() @FC.options["page_filter_with_names"] @FC.options["verbosity"] def get_page_conf(directory, page_filter, names): """Get the configuration of a page, in json.""" page = FC.find_page( FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ), page_filter, ) print(FC.get_args_as_json(page, names or None)) @cli.command() @FC.options["page_filter_with_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def set_page_conf(directory, page_filter, names_and_values, dry_run): """Set the value of some entries of a page configuration.""" page = FC.find_page( FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ), page_filter, ) print(FC.rename_entity(page, FC.get_update_args_filename(page, names_and_values), dry_run=dry_run)[1]) PAGE_NUMBER_EXPRESSION_HELP = """\ Expression can be an empty string (same as not passing this option) to use the next available page in order, or "NUMBER+" for the first page available after this number, or "NUMBER+NUMBER" for the first page available between the two numbers (exclusive), or "?" for a random available page, or "NUMBER? for a random available page after this number, or "?NUMBER" for a random available page before this number, or "NUMBER?NUMBER" for a random available page between the two numbers (exclusive). If no available page match the request, a error will be raised.""" @cli.command() @FC.options["directory"] @cloup.option( "-p", "--page", type=str, required=False, help="The page number or an expression to find one available." + PAGE_NUMBER_EXPRESSION_HELP, callback=FC.validate_number_expression, ) @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def create_page(directory, page, names_and_values, dry_run): """Create a new page with configuration""" deck = FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ) print(FC.create_page(deck, FC.get_one_page(deck, *page), names_and_values, dry_run=dry_run)) @cli.command() @FC.options["page_filter"] @cloup.option( "-tp", "--to-page", "to_page_number", type=str, required=False, help="The page number of the new page or an expression to find one available." + PAGE_NUMBER_EXPRESSION_HELP, callback=FC.validate_number_expression, ) @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def copy_page(directory, page_filter, to_page_number, names_and_values, dry_run): """Copy a page and all its content""" deck = FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ) page = FC.find_page(deck, page_filter) print(FC.copy_page(page, FC.get_one_page(deck, *to_page_number), names_and_values, dry_run=dry_run)) @cli.command() @FC.options["page_filter"] @cloup.option( "-tp", "--to-page", "to_page_number", type=str, required=False, help="The page number of the new page or an expression to find one available." + PAGE_NUMBER_EXPRESSION_HELP, callback=FC.validate_number_expression, ) @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def move_page(directory, page_filter, to_page_number, names_and_values, dry_run): """Move a page to a different number""" deck = FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ) page = FC.find_page(deck, page_filter) print(FC.move_page(page, FC.get_one_page(deck, *to_page_number), names_and_values, dry_run=dry_run)) @cli.command() @FC.options["page_filter"] @FC.options["dry_run"] @FC.options["verbosity"] def delete_page(directory, page_filter, dry_run): """Fully delete a page directory.""" page = FC.find_page( FC.get_deck( directory, key_filter=FILTER_DENY, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY, ), page_filter, ) print(FC.delete_entity(page, dry_run=dry_run)) @cli.command() @FC.options["page_filter"] @FC.options["disabled_flag"] @FC.options["verbosity"] def list_keys(directory, page_filter, with_disabled): """List the keys of a page""" page = FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ) FC.list_content(page, Key, with_disabled=with_disabled) @cli.command() @FC.options["key_filter"] @FC.options["verbosity"] def get_key_path(directory, page_filter, key_filter): """Get the path of a key.""" key = FC.find_key( FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ), key_filter, ) print(key.path) @cli.command() @FC.options["key_filter_with_names"] @FC.options["verbosity"] def get_key_conf(directory, page_filter, key_filter, names): """Get the configuration of a key, in json.""" key = FC.find_key( FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ), key_filter, ) print(FC.get_args_as_json(key, names or None)) @cli.command() @FC.options["key_filter_with_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def set_key_conf(directory, page_filter, key_filter, names_and_values, dry_run): """Set the value of some entries of a key configuration.""" key = FC.find_key( FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ), key_filter, ) print(FC.rename_entity(key, FC.get_update_args_filename(key, names_and_values), dry_run=dry_run)[1]) KEY_EXPRESSION_HELP = """\ Expression can be '+' to use the next available key in order (row by row), or "?" for a random available key. If no available key match the request, a error will be raised.""" @cli.command() @FC.options["page_filter"] @cloup.option( "-k", "--key", type=str, required=True, help='The key position as "row,col" or an expression to find an available key.' + KEY_EXPRESSION_HELP, callback=FC.validate_key_expression, ) @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def create_key(directory, page_filter, key, names_and_values, dry_run): """Create a new key with configuration""" page = FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ) to_row, to_col = FC.get_one_key(page, key or "+") print(FC.create_key(page, to_row, to_col, names_and_values, dry_run=dry_run)) @cli.command() @FC.options["key_filter"] @FC.options["to_page"] @cloup.option( "-tk", "--to-key", "to_key", type=str, required=False, help='The optional destination key position as "row,col" or an expression to find an available key.' + KEY_EXPRESSION_HELP, callback=FC.validate_key_expression, ) @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def copy_key(directory, page_filter, key_filter, to_page_filter, to_key, names_and_values, dry_run): """Copy a key and all its content""" deck = FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY) key = FC.find_key(FC.find_page(deck, page_filter), key_filter) to_page = FC.find_page(deck, to_page_filter) if to_page_filter else key.page to_row, to_col = FC.get_one_key(to_page, to_key) or key.key print(FC.copy_key(key, to_page, to_row, to_col, names_and_values, dry_run=dry_run)) @cli.command() @FC.options["key_filter"] @FC.options["to_page"] @cloup.option( "-tk", "--to-key", "to_key", type=str, required=False, help='The optional destination key position as "row,col" or an expression to find an available key.' + KEY_EXPRESSION_HELP, callback=FC.validate_key_expression, ) @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def move_key(directory, page_filter, key_filter, to_page_filter, to_key, names_and_values, dry_run): """Move a key to another page and/or a different position""" deck = FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY) key = FC.find_key(FC.find_page(deck, page_filter), key_filter) to_page = FC.find_page(deck, to_page_filter) if to_page_filter else key.page to_row, to_col = FC.get_one_key(to_page, to_key) or key.key print(FC.move_key(key, to_page, to_row, to_col, names_and_values, dry_run=dry_run)) @cli.command() @FC.options["key_filter"] @FC.options["dry_run"] @FC.options["verbosity"] def delete_key(directory, page_filter, key_filter, dry_run): """Fully delete of a key directory.""" key = FC.find_key( FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ), key_filter, ) print(FC.delete_entity(key, dry_run=dry_run)) @cli.command() @FC.options["key_filter"] @FC.options["disabled_flag"] @FC.options["verbosity"] def list_images(directory, page_filter, key_filter, with_disabled): """List the image layers of a key""" key = FC.find_key( FC.find_page(FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter), key_filter, ) FC.list_content(key, KeyImageLayer, with_disabled=with_disabled) @cli.command() @FC.options["layer_filter"] @FC.options["verbosity"] def get_image_path(directory, page_filter, key_filter, layer_filter): """Get the path of an image layer.""" layer = FC.find_layer( FC.find_key( FC.find_page(FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter), key_filter, ), layer_filter, ) print(layer.path) @cli.command() @FC.options["layer_filter_with_names"] @FC.options["verbosity"] def get_image_conf(directory, page_filter, key_filter, layer_filter, names): """Get the configuration of an image layer, in json.""" layer = FC.find_layer( FC.find_key( FC.find_page(FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter), key_filter, ), layer_filter, ) print(FC.get_args_as_json(layer, names or None)) @cli.command() @FC.options["layer_filter_with_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def set_image_conf(directory, page_filter, key_filter, layer_filter, names_and_values, dry_run): """Set the value of some entries of an image configuration.""" layer = FC.find_layer( FC.find_key( FC.find_page(FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter), key_filter, ), layer_filter, ) print(FC.rename_entity(layer, FC.update_filename(layer, names_and_values), dry_run=dry_run)[1]) @cli.command() @FC.options["key_filter"] @FC.options["optional_names_and_values"] @FC.options["link"] @FC.options["dry_run"] @FC.options["verbosity"] def create_image(directory, page_filter, key_filter, names_and_values, link, dry_run): """Create a new image layer with configuration""" key = FC.find_key( FC.find_page( FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter, ), key_filter, ) print(FC.create_layer(key, names_and_values, link, dry_run=dry_run)) @cli.command() @FC.options["layer_filter"] @FC.options["to_page_to_key"] @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def copy_image( directory, page_filter, key_filter, layer_filter, to_page_filter, to_key_filter, names_and_values, dry_run ): """Copy an image layer""" deck = FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY) layer = FC.find_layer(FC.find_key(FC.find_page(deck, page_filter), key_filter), layer_filter) to_page = FC.find_page(deck, to_page_filter) if to_page_filter else layer.page if to_key_filter: to_key = FC.find_key(to_page, to_key_filter) elif to_page_filter: to_key = FC.find_key(to_page, "%s,%s" % layer.key.key) else: to_key = layer.key print(FC.copy_layer(layer, to_key, names_and_values, dry_run=dry_run)) @cli.command() @FC.options["layer_filter"] @FC.options["to_page_to_key"] @FC.options["optional_names_and_values"] @FC.options["dry_run"] @FC.options["verbosity"] def move_image( directory, page_filter, key_filter, layer_filter, to_page_filter, to_key_filter, names_and_values, dry_run ): """Move a layer to another key""" deck = FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY) layer = FC.find_layer(FC.find_key(FC.find_page(deck, page_filter), key_filter), layer_filter) to_page = FC.find_page(deck, to_page_filter) if to_page_filter else layer.page if to_key_filter: to_key = FC.find_key(to_page, to_key_filter) elif to_page_filter: to_key = FC.find_key(to_page, "%s,%s" % layer.key.key) else: to_key = layer.key print(FC.move_layer(layer, to_key, names_and_values, dry_run=dry_run)) @cli.command() @FC.options["layer_filter"] @FC.options["dry_run"] @FC.options["verbosity"] def delete_image(directory, page_filter, key_filter, layer_filter, dry_run): """Delete an image layer.""" layer = FC.find_layer( FC.find_key( FC.find_page(FC.get_deck(directory, event_filter=FILTER_DENY, text_line_filter=FILTER_DENY), page_filter), key_filter, ), layer_filter, ) print(FC.delete_entity(layer, dry_run=dry_run)) @cli.command() @FC.options["key_filter"] @FC.options["disabled_flag"] @FC.options["verbosity"] def list_texts(directory, page_filter, key_filter, with_disabled): """List the text lines of a key""" key = FC.find_key( FC.find_page(FC.get_deck(directory, event_filter=FILTER_DENY, layer_filter=FILTER_DENY), page_filter), key_filter, ) FC.list_content(key, KeyTextLine, with_disabled=with_disabled) @cli.command() @FC.options["text_line_filter"] @FC.options["verbosity"] def get_text_path(directory, page_filter, key_filter, text_line_filter): """Get the path of an
""" """ __author__ = "<NAME>" __version__ = "0.0" import sys import time import os import math import argparse import numpy as np import matplotlib.pyplot as plt import matplotlib.lines as lines import scipy.optimize as opt import utils.fileio as fileio import utils.display as display import utils.model as model import utils.data as data import utils.fit as fit ROOT_8_LOG_2 = (8 * math.log(2)) ** 0.5 ONE_OVER_ROOT_TAU = 1 / (2 * math.pi) # comparisons for floats iseq = lambda f1,f2,eps=1e-9:abs(f1-f2)<eps # convenient way to discard values which are zero or negative positive = lambda ar:ar[ar>0] n_squish_pars = 2 # exponential thingy squish_p0 = [0.1, 160.0] k = 2 sat_crx = lambda A1,b,a:A1*(1+b*np.exp(-(a*1000/A1)**k)) sat_crx_jac = lambda A1,b,a:1+(1-k*(a*1000/A1)**k)*b*np.exp(-(a*1000/A1)**k) # # one over exponential thingy # squish_p0 = [0.1, 160] # k=2 # sat_crx = lambda A1,b,a:A1 / (1 - b*np.exp(-(a*1000/A1)**k)) # sat_crx_jac = lambda A1,b,a:(1 - b*np.exp(-(a*1000/A1)**k)*(k*(a/A1)**k - 1)) / ((1 - b*np.exp(-(a*1000/A1)**k))**2) # # # power law thingy # squish_p0 = [0.01, 160] # k = -2 # sat_crx = lambda A1,b,a:A1*(1 + b*(a*1000/A1)**k) # sat_crx_jac = lambda A1,b,a:1 + (1-k)*b*(a*1000/A1)**k # # tanh # squish_p0 = [1.0, 60.0] # sat_crx = lambda A1,b,a:b*(a*1000)*np.sinh(A1/(a*1000)) # sat_crx_jac = lambda A1,b,a:b*(a*1000)*np.cosh(A1/(a*1000))/A1 # class transformer(object): def __init__(self, en, en_edges=None, ar_edges=None): self.en = en self.en_edges = en_edges self.ar_edges = ar_edges if (en_edges is not None) and (ar_edges is not None): self.setup() def __call__(self, ar_mids, *params, en_counts=None): # use internal values or broadcast supplied values if en_counts is None: dd_en_counts = self.dd_en_counts else: dd_en_counts = en_counts[None,:] if ar_mids is None: dd_ar_mids = self.dd_ar_mids else: dd_ar_mids = ar_mids[:,None] # unpack parameters C, gamma, *res_s, rho_p, ps0, ps1 = params # calculate pieces dd_mu = self.dd_en_mids * gamma dd_sigma = self.res(self.dd_en_mids, *params) * dd_mu # # calculate inverse transformed area midpoints dd_ar_mids_it = sat_crx(dd_ar_mids, ps0, ps1) jacobian_it = sat_crx_jac(dd_ar_mids, ps0, ps1) # mult = C * ONE_OVER_ROOT_TAU * (dd_en_counts / dd_sigma) * jacobian_it expo = np.exp((-0.5) * ((dd_ar_mids_it - dd_mu) / (dd_sigma))**2) dd_ar_counts = mult*expo # sum along energy axis to get area counts ar_counts = dd_ar_counts.sum(1) return ar_counts def res(self, en, *params): # unpack C, gamma, *res_s, rho_p, ps0, ps1 = params rs0 = res_s[0] rs1 = res_s[1] # PMT resolution res_p_squared = rho_p / en # scintillator resolution # res_s_squared = rs0**2 res_s_squared = rs0 - rs1*en return np.sqrt(abs(res_s_squared) + abs(res_p_squared)) def setup(self, en_edges=None, ar_edges=None): """performs binning and calculates static objects""" # assume new edges if specified if en_edges is not None: self.en_edges = en_edges if ar_edges is not None: self.ar_edges = ar_edges # if edges were specified at some point if (self.en_edges is not None) and (self.ar_edges is not None): self._bin() # edges not specified here or in init else: raise ValueError("need values for ar_edges and en_edges to perform setup") def _bin(self): """calculates and stores bin midpoints and counts""" # energy midpoints and counts self.en_counts, en_edges = np.histogram(self.en, self.en_edges) if not iseq(0, (en_edges-self.en_edges).sum()): raise Warning("given and returned bin edges do not match!") self.en_mids = 0.5*(en_edges[1:] + en_edges[:-1]) self.en_res = self.en_mids.size # area midpoints self.ar_mids = 0.5*(self.ar_edges[1:] + self.ar_edges[:-1]) self.ar_res = self.ar_mids.size # 2d arrays # axis 0 = area # axis 1 = energy self.dd_en_counts = np.zeros([self.ar_res, self.en_res]) self.dd_en_counts[:,:] = self.en_counts[None,:] self.dd_en_mids = np.zeros([self.ar_res, self.en_res]) self.dd_en_mids[:,:] = self.en_mids[None,:] self.dd_ar_mids = np.zeros([self.ar_res, self.en_res]) self.dd_ar_mids[:,:] = self.ar_mids[:,None] class transformer_mixer(transformer): """transformer subclass which also mixes in a background spectrum""" def __init__(self, en, en_edges, bg, ar_edges): self.bg = bg super(transformer_mixer, self).__init__(en, en_edges, ar_edges) # if ar_edges is not None: # self.setup_background() def setup(self,*args,**kwargs): super(transformer_mixer,self).setup(*args,**kwargs) if self.ar_edges is not None: self.setup_background() def setup_background(self): if self.ar_edges is not None: self._bin_background() else: raise ValueError("need ar_edges to perform setup_background") def _bin_background(self): self.bg_counts, bg_edges = np.histogram(self.bg, self.ar_edges) if not iseq(0, (bg_edges-self.ar_edges).sum()): raise Warning("given and returned bin values do not match!") def __call__(self, ar_mids, *params, en_counts=None, bg_counts=None, incl_src=True, incl_bg=True): """calculate area""" # unpack parameters C, D, gamma, *res_s, rho_p, ps0, ps1 = params # calculate simulated source contribution p_src = [C, gamma, *res_s, rho_p, ps0, ps1] res_src = super(transformer_mixer,self).__call__(ar_mids, *p_src, en_counts=en_counts) # calculate background contribution res_bg = D * (bg_counts if bg_counts is not None else self.bg_counts) # return sum of source and background contributions if incl_src and incl_bg: return res_src + res_bg elif incl_src: return res_src elif incl_bg: return res_bg else: return 0 class routine(object): verbosity = 2 ch_comp = [1,2,3] ch_load = [1,2,3,4] # specify expermental source data dir_exp_data = '../xrd-analysis/data/root/scintillator' exp_runs = { "Am241":4291, "Ba133":4293, "Cd109":4292, "Co57" :4294, "Mn54" :0, "Na22" :0, } exp_file = "Run{}.root" # specify background data dir_bg_data = '../xrd-analysis/data/root/scintillator' bg_runs = [4225, 4226] bg_file = "Run{}.root" # specify simulation data dir_sim_data = '../xrd-analysis/data/root/simulation' # sim_sources = ["Am241", "Ba133", "Cd109", "Co57", "Mn54", "Na22"] # sim_sources = ["Am241","Ba133","Cd109","Co57"] sim_sources = ["Am241","Cd109","Ba133"] # sim_sources = ["Am241"] sim_file = "{}.root" # ranges capturing the entire range of each source's spectrum src_area_range_full = { "Am241": [10000, 85000], "Ba133": [ 5000, 520000], "Cd109": [10000, 130000], "Co57" : [ 5000, 200000], "Mn54" : [10000, 1200000], "Na22" : [10000, 800000], } src_energy_range_full = { "Am241": [0, 60], "Ba133": [0, 380], "Cd109": [0, 90], "Co57" : [0, 140], "Mn54" : [0, 900], "Na22" : [0, 520], } # comparison range for source spectra vs. sim+bg src_range_default = [10000,1000000] # # restrictive ranges, for excluding PMT saturation and low-tail of Am241 # src_area_range = { # "Am241":{1:[31000,70000], 2:[32000,72000], 3:[33000,75000]}, # "Ba133":{1:[10000,55000], 2:[10000,55000], 3:[10000,55000]}, # "Cd109":{1:[10000,60000], 2:[10000,60000], 3:[10000,60000]}, # "Co57" :{1:[12000,65000], 2:[12000,65000], 3:[12000,65000]}, # "Mn54" :[], # "Na22" :[], # } # # less restrictive, but still mostly excluding PMT saturation # src_area_range = { # "Am241":{1:[11000,80000], 2:[11000,80000], 3:[11000,80000]}, # "Ba133":{1:[11000,80000], 2:[11000,80000], 3:[11000,80000]}, # "Cd109":{1:[11000,80000], 2:[11000,80000], 3:[11000,80000]}, # "Co57" :{1:[11000,80000], 2:[11000,80000], 3:[11000,80000]}, # "Mn54" :[], # "Na22" :[], # } # including pmt saturation range src_area_range = { "Am241":{1:[11000, 90000], 2:[11000, 90000], 3:[11000, 90000]}, "Ba133":{1:[11000,600000], 2:[11000,600000], 3:[11000,600000]}, "Cd109":{1:[11000,130000], 2:[11000,130000], 3:[11000,130000]}, "Co57" :{1:[11000,200000], 2:[11000,200000], 3:[11000,200000]}, "Mn54" :[], "Na22" :[], } # source peak energies for plotting vlines peak_e = { "Am241": [ 26.34, 59.54], "Ba133": [ 30.85, 81.00], "Cd109": [ 22.1 , 88.04], "Co57" : [122.06, 136.47], "Mn54" : [834.85 ], "Na22" : [511.0 ], } # conversion parameters # just guesses which should be close enough for optimization gamma = { 1: 65835.2 / 60.0, 2: 67874.3 / 60.0, 3: 35789.0 / 30.0, 4: 33384.1 / 60.0, } res_s = {1:0.060, 2:0.080, 3:0.075} rp_rref = {1:0.035, 2:0.034, 3:0.038} rp_eref = {1:122 , 2:122 , 3:122 } n_bg_guess = { "Am241":{1: 6000, 2: 6000, 3: 6000}, "Ba133":{1:10000, 2:10000, 3:10000}, "Cd109":{1: 4000, 2: 4000, 3: 4000}, "Co57" :{1: 3000, 2: 1000, 3: 1000}, } n_src_guess = { "Am241":{1: 15000, 2:15000, 3:15000}, "Ba133":{1: 75000, 2:75000, 3:75000}, "Cd109":{1: 70000, 2:70000, 3:70000}, "Co57" :{1: 100, 2: 1000, 3: 1000}, } # discard events with area in channel 4 (LYSO) greater than this # todo: by channel lyso_cut_hi = 14000.0 lyso_branch = "area_3046_4" exp_area_pvs = "area_3046_{}" sim_edep_mev = "Edep_MeV_Si{}" sim_edep_kev = "e{}" sim_area_pvs = "a{}" # transformer resolution for energy and area xf_res_en = 200 xf_res_ar = 200 def vp(self, level, *args, **kwargs): """verbosity print. print if verbosity >= level.""" if self.verbosity >= level: print(*args, **kwargs) def __init__(self, ): self.rho_p = {c:(self.rp_eref[c] * self.rp_rref[c]**2) for c in self.ch_comp} # branches to load for energy and area self.branches_a = {self.exp_area_pvs.format(n) for n in self.ch_load} self.branches_e = {self.sim_edep_mev.format(n) for n in self.ch_comp} # compose lists and dicts of files to be loaded self.exp_files = {s:os.sep.join([self.dir_exp_data, self.exp_file.format(r)]) for s,r in self.exp_runs.items() if r} self.sim_files = {_:os.sep.join([self.dir_sim_data, self.sim_file.format(_)]) for _ in self.sim_sources} self.bg_files = [os.sep.join([self.dir_exp_data, self.exp_file.format(r)]) for r in self.bg_runs] # compose lists of sources self.sim_sources = sorted(self.sim_sources) self.exp_sources = sorted(self.exp_runs.keys()) self.all_sources = sorted(set(self.exp_sources) & set(self.sim_sources)) self.any_sources = sorted(set(self.exp_sources) | set(self.sim_sources)) self.source_index = {s:i for i,s in enumerate(self.any_sources)} # todo: accept args to determine currently hard-coded parameters def procure_data(self): """load data and process into forms needed by the rest of the routine""" # calculate bin edges and midpoints # # full ranges, which don't depend on channel (yet) self.en_edges_full = {} self.ar_edges_full = {} self.en_mids_full = {} self.ar_mids_full = {} # # comparison ranges, which depend on source and channel self.en_edges = {_:{} for _ in self.sim_sources} self.ar_edges = {_:{} for _ in self.sim_sources} self.en_mids = {_:{} for _ in self.sim_sources} self.ar_mids = {_:{} for _ in self.sim_sources} for src in self.sim_sources: self.en_edges_full[src] = data.edges_lin(*self.src_energy_range_full[src],self.xf_res_en) self.ar_edges_full[src] = data.edges_lin(*self.src_area_range_full[ src],self.xf_res_ar) self.en_mids_full[src] = 0.5 * (self.en_edges_full[src][1:] + self.en_edges_full[src][:-1]) self.ar_mids_full[src] = 0.5 * (self.ar_edges_full[src][1:] + self.ar_edges_full[src][:-1]) for ch in self.ch_comp: self.ar_edges[src][ch] = data.edges_lin(*self.src_area_range.get(src,{}).get(ch,self.src_range_default),self.xf_res_ar) self.ar_mids[src][ch] = 0.5 * (self.ar_edges[src][ch][1:] + self.ar_edges[src][ch][:-1]) self.en_edges[src][ch] = self.en_edges_full[src] self.en_mids[src][ch] = self.en_mids_full[src] # create bud functions for converting MeV to KeV buds_mev_to_kev = [ data.bud_function( self.sim_edep_mev.format(n), self.sim_edep_kev.format(n), lambda e:e*1000 ) for n in self.ch_comp ] # create mask function for requiring that at least one scintillator # has energy deposited mask_any_activity = data.mask_any(*[ data.cut( self.sim_edep_mev.format(n), 0 ) for n in self.ch_comp ]) # load branches and create branch managers for experimental data self.bms_obs_src = {} for src,file in self.exp_files.items(): branches = fileio.load_branches(file, self.branches_a) this_bm = data.BranchManager(branches) self.bms_obs_src[src] = this_bm # apply lyso veto this_bm.mask(data.cut(self.lyso_branch,hi=self.lyso_cut_hi),apply_mask=True) # load branches and create branch manager for experimental background spectra bg_branch_sets = [] for file in self.bg_files: branches = fileio.load_branches(file, self.branches_a) bg_branch_sets.append(branches) combined_branches = {key:np.concatenate([_[key] for _ in bg_branch_sets]) for key in self.branches_a} self.bm_obs_bg = data.BranchManager(combined_branches) self.bm_obs_bg.mask(data.cut(self.lyso_branch,hi=self.lyso_cut_hi),apply_mask=True) # load branches and create branch managers for simulation data self.bms_sim_src = {} self.sim_e = {_:{} for _ in self.sim_sources} self.sim_bg = {} self.transformers = {_:{} for _ in self.sim_sources} for src in self.sim_sources: # load branches corresponding to energy deposits in each scint branches = fileio.load_branches(self.sim_files[src], self.branches_e) # create and store branch manager this_bm = data.BranchManager(branches) self.bms_sim_src[src] = this_bm # discard events with no energy deposit in any scintillator self.vp(1, "{}, n events, n events with any energy deposit".format(src)) self.vp(1, this_bm[self.sim_edep_mev.format(self.ch_comp[0])].shape[0]) this_bm.mask(mask_any_activity, apply_mask = True) self.vp(1, this_bm[self.sim_edep_mev.format(self.ch_comp[0])].shape[0]) # make new branches to convert MeV to KeV for convenience this_bm.bud(buds_mev_to_kev) # # <TEMP> # # test finding unique energies # plt.subplots(nrows=1,ncols=len(self.ch_comp),sharex=True,sharey=True) # for ich,ch in enumerate(self.ch_comp): # this_e = positive(this_bm[self.sim_edep_kev.format(ch)]) # print("") # print("deposited energies in channel {}".format(ch)) # counts = {} # for e in this_e: # key = round(e, 1) # counts[key] = counts.get(key,0)
iBuilding in [iPaganTemple, iSacrificialAltar]: iTemples = getNumBuildings(iPlayer, iPaganTemple) iAltars = getNumBuildings(iPlayer, iSacrificialAltar) if iTemples >= 6 and iAltars >= 6: win(iPlayer, 1) # first Mexican goal: build three cathedrals of your state religion by 1880 AD elif iCiv == iMexico: if isPossible(iPlayer, 0): iStateReligion = pPlayer.getStateReligion() if iStateReligion >= 0: iStateReligionCathedral = iCathedral + 4 * iStateReligion if iBuilding == iStateReligionCathedral: if getNumBuildings(iPlayer, iStateReligionCathedral) >= 3: win(iPlayer, 0) # first Mughal goal: build three Islamic Mosques by 1500 AD elif iCiv == iMughals: if isPossible(iPlayer, 0): if iBuilding == iIslamicCathedral: if getNumBuildings(iPlayer, iIslamicCathedral) >= 3: win(iPlayer, 0) # first Incan goal: build 5 tambos and a road along the Andean coast by 1500 AD elif iCiv == iInca: if isPossible(iPlayer, 0): if iBuilding == iTambo: if isRoad(iPlayer, lAndeanCoast) and getNumBuildings(iPlayer, iTambo) >= 5: win(iPlayer, 0) def checkWonderGoal(iPlayer, lWonders): return all(data.getWonderBuilder(iWonder) == iPlayer for iWonder in lWonders) @handler("religionFounded") def onReligionFounded(iReligion, iPlayer): if not game.isVictoryValid(7): return # handle all "be the first to found" goals if not isFounded(iReligion): data.lReligionFounder[iReligion] = iPlayer for iLoopCiv, (iGoal, lReligion) in dReligionGoals.items(): iLoopPlayer = slot(iLoopCiv) if not isPossible(iLoopPlayer, iGoal): continue if iReligion in lReligions: if iPlayer != iLoopPlayer: lose(iLoopPlayer, iGoal) elif checkReligionGoal(iLoopPlayer, lReligions): win(iLoopPlayer, iGoal) def checkReligionGoal(iPlayer, lReligions): return all(data.lReligionFounder[iReligion] == iPlayer for iReligion in lReligion) @handler("projectBuilt") def onProjectBuilt(iPlayer, iProject): if not game.isVictoryValid(7): return # second Russian goal: be the first civilization to complete the Manhattan Project and the Apollo Program iRussiaPlayer = slot(iRussia) if isPossible(iRussiaPlayer, 1): if iProject in [iLunarLanding, iManhattanProject]: if iPlayer == iRussiaPlayer: bApolloProgram = iProject == iLunarLanding or team(iRussiaPlayer).getProjectCount(iLunarLanding) > 0 bManhattanProject = iProject == iManhattanProject or team(iRussiaPlayer).getProjectCount(iManhattanProject) > 0 if bApolloProgram and bManhattanProject: win(iRussiaPlayer, 1) else: lose(iRussiaPlayer, 1) @handler("combatResult") def onCombatResult(pWinningUnit, pLosingUnit): iWinningPlayer = pWinningUnit.getOwner() iLosingPlayer = pLosingUnit.getOwner() iWinningCiv = civ(iWinningPlayer) if not player(iWinningPlayer).isHuman() and data.bIgnoreAI: return pLosingUnitInfo = infos.unit(pLosingUnit) iDomainSea = DomainTypes.DOMAIN_SEA # second English goal: control a total of 25 frigates and ships of the line and sink 50 ships in 1800 AD if iWinningCiv == iEngland: if isPossible(iWinningPlayer, 1): if pLosingUnitInfo.getDomainType() == iDomainSea: data.iEnglishSinks += 1 # third Korean goal: sink 20 enemy ships elif iWinningCiv == iKorea: if isPossible(iWinningPlayer, 2): if pLosingUnitInfo.getDomainType() == iDomainSea: data.iKoreanSinks += 1 if data.iKoreanSinks >= 20: win(iWinningPlayer, 2) @handler("greatPersonBorn") def onGreatPersonBorn(unit, iPlayer): iUnitType = base_unit(unit) pUnitInfo = infos.unit(iUnitType) iCiv = civ(iPlayer) if not isGreatPersonTypeBorn(iUnitType): data.lFirstGreatPeople[lGreatPeopleUnits.index(iUnitType)] = iPlayer # second Mexican goal: get three great generals by 1940 AD if iCiv == iMexico: if isPossible(iPlayer, 1): if pUnitInfo.getGreatPeoples(iSpecialistGreatGeneral): data.iMexicanGreatGenerals += 1 if data.iMexicanGreatGenerals >= 3: win(iPlayer, 1) @handler("unitPillage") def onUnitPillage(unit, iImprovement, iRoute, iPlayer, iGold): if iImprovement < 0 or iGold >= 1000: return iCiv = civ(iPlayer) # third Viking goal: acquire 3000 gold by pillaging, conquering cities and sinking ships by 1500 AD if iCiv == iVikings: if isPossible(iPlayer, 2): data.iVikingGold += iGold # first Turkic goal: pillage 20 improvements by 900 AD elif iCiv == iTurks: if isPossible(iPlayer, 0): data.iTurkicPillages += 1 # third Moorish goal: acquire 3000 gold through piracy by 1650 AD elif iCiv == iMoors: if isPossible(iPlayer, 2) and unit.getUnitType() == iCorsair: data.iMoorishGold += iGold @handler("cityCaptureGold") def onCityCaptureGold(city, iPlayer, iGold): # third Viking goal: acquire 3000 gold by pillaging, conquering cities and sinking ships by 1500 AD if civ(iPlayer) == iVikings: if isPossible(iPlayer, 2): data.iVikingGold += iGold @handler("playerGoldTrade") def onPlayerGoldTrade(iFrom, iTo, iGold): # third Tamil goal: acquire 4000 gold by trade by 1200 AD if civ(iTo) == iTamils: if isPossible(iTo, 2): data.iTamilTradeGold += iGold * 100 @handler("playerSlaveTrade") def onPlayerSlaveTrade(iPlayer, iGold): # second Congolese goal: gain 1000 gold through slave trade by 1800 AD if civ(iPlayer) == iCongo: if isPossible(iPlayer, 1): data.iCongoSlaveCounter += iGold if data.iCongoSlaveCounter >= scale(1000): win(iPlayer, 1) @handler("tradeMission") def onTradeMission(iUnit, iPlayer, iX, iY, iGold): iCiv = civ(iPlayer) # third Tamil goal: acquire 4000 gold by trade by 1200 AD if iCiv == iTamils: data.iTamilTradeGold += iGold * 100 # first Mande goal: conduct a trade mission in your state religion's holy city by 1350 AD elif iCiv == iMali: if isPossible(iPlayer, 0): iStateReligion = player(iPlayer).getStateReligion() if iStateReligion != -1: pHolyCity = game.getHolyCity(iStateReligion) if location(pHolyCity) == (iX, iY): win(iPlayer, 0) @handler("peaceBrokered") def onPeaceBrokered(iBroker, iPlayer1, iPlayer2): iBrokerCiv = civ(iBroker) # third Canadian goal: end twelve wars through diplomacy by 2000 AD if iBrokerCiv == iCanada: if isPossible(iBroker, 2): data.iCanadianPeaceDeals += 1 if data.iCanadianPeaceDeals >= 12: win(iBroker, 2) @handler("blockade") def onBlockade(iPlayer, iGold): iCiv = civ(iPlayer) # third Moorish goal: acquire 3000 gold through piracy by 1650 AD if iCiv == iMoors: if isPossible(iPlayer, 2): data.iMoorishGold += iGold @handler("firstContact") def onFirstContact(iPlayer, iHasMetPlayer): # third Maya goal: make contact with a European civilization before they have discovered America iMayaPlayer = slot(iMaya) if isPossible(iMayaPlayer, 2): if iMayaPlayer in [iPlayer, iHasMetPlayer]: if iPlayer == iMayaPlayer and civ(iHasMetPlayer) in dCivGroups[iCivGroupEurope]: iEuropean = iHasMetPlayer elif iHasMetPlayer == iMayaPlayer and civ(iPlayer) in dCivGroups[iCivGroupEurope]: iEuropean = iPlayer else: return for plot in plots.start(tNorthAmerica[0]).end(tNorthAmerica[1][0]+2, tNorthAmerica[1][1]) + plots.start(tSouthCentralAmerica[0]).end(tSouthCentralAmerica[1][0]+2, tSouthCentralAmerica[1][1]): if plot.isRevealed(iEuropean, False) and not plot.isWater(): lose(iMayaPlayer, 2) return @handler("playerChangeStateReligion") def onPlayerChangeStateReligion(iPlayer, iStateReligion): iCiv = civ(iPlayer) # second Ethiopian goal: convert to Orthodoxy five turns after it is founded if iCiv == iEthiopia: if iStateReligion == iOrthodoxy: if game.isReligionFounded(iOrthodoxy): if turn() <= game.getReligionGameTurnFounded(iOrthodoxy) + turns(5): data.bEthiopiaConverted = True def checkReligiousGoals(iPlayer): return all(checkReligiousGoal(iPlayer, i) == 1 for i in range(3)) def checkReligiousGoal(iPlayer, iGoal): pPlayer = player(iPlayer) iVictoryType = getReligiousVictoryType(iPlayer) if iVictoryType == -1: return -1 elif iVictoryType == iJudaism: # first Jewish goal: have a total of 15 Great Prophets, Scientists and Statesmen in Jewish cities if iGoal == 0: iProphets = countReligionSpecialists(iJudaism, iSpecialistGreatProphet) iScientists = countReligionSpecialists(iJudaism, iSpecialistGreatScientist) iStatesmen = countReligionSpecialists(iJudaism, iSpecialistGreatStatesman) if iProphets + iScientists + iStatesmen >= 15: return 1 # second Jewish goal: have legendary culture in the Jewish holy city elif iGoal == 1: pHolyCity = game.getHolyCity(iJudaism) if pHolyCity.getOwner() == iPlayer and pHolyCity.getCultureLevel() >= 6: return 1 # third Jewish goal: have friendly relations with six civilizations with Jewish minorities elif iGoal == 2: iFriendlyRelations = countPlayersWithAttitudeAndReligion(iPlayer, AttitudeTypes.ATTITUDE_FRIENDLY, iJudaism) if iFriendlyRelations >= 6: return 1 elif iVictoryType == iOrthodoxy: # first Orthodox goal: build four Orthodox cathedrals if iGoal == 0: if getNumBuildings(iPlayer, iOrthodoxCathedral) >= 4: return 1 # second Orthodox goal: make sure the five most cultured cities in the world are Orthodox elif iGoal == 1: if countBestCitiesReligion(iOrthodoxy, cityCulture, 5) >= 5: return 1 # third Orthodox goal: make sure there are no Catholic civilizations in the world elif iGoal == 2: if countReligionPlayers(iCatholicism)[0] == 0: return 1 elif iVictoryType == iCatholicism: # first Catholic goal: be pope for 100 turns if iGoal == 0: if data.iPopeTurns >= turns(100): return 1 # second Catholic goal: control the Catholic shrine and make sure 12 great prophets are settled in Catholic civilizations elif iGoal == 1: bShrine = getNumBuildings(iPlayer, iCatholicShrine) > 0 iSaints = countReligionSpecialists(iCatholicism, iSpecialistGreatProphet) if bShrine and iSaints >= 12: return 1 # third Catholic goal: make sure 50% of world territory is controlled by Catholic civilizations elif iGoal == 2: if getReligiousLand(iCatholicism) >= 50.0: return 1 elif iVictoryType == iProtestantism: # first Protestant goal: be first to discover Civil Liberties, Constitution and Economics if iGoal == 0: lProtestantTechs = [iCivilLiberties, iSocialContract, iEconomics] if checkTechGoal(iPlayer, lProtestantTechs): return 1 elif data.lFirstDiscovered[iCivilLiberties] not in [iPlayer, -1] or data.lFirstDiscovered[iSocialContract] not in [iPlayer, -1] or data.lFirstDiscovered[iEconomics] not in [iPlayer, -1]: return 0 # second Protestant goal: make sure five great merchants and great engineers are settled in Protestant civilizations elif iGoal == 1: iEngineers = countReligionSpecialists(iProtestantism, iSpecialistGreatEngineer) iMerchants = countReligionSpecialists(iProtestantism, iSpecialistGreatMerchant) if iEngineers >= 5 and iMerchants >= 5: return 1 # third Protestant goal: make sure at least half of all civilizations are Protestant or Secular elif iGoal == 2: iProtestantCivs, iTotal = countReligionPlayers(iProtestantism) iSecularCivs, iTotal = countCivicPlayers(iSecularism) if 2 * (iProtestantCivs + iSecularCivs) >= iTotal: return 1 elif iVictoryType == iIslam: # first Muslim goal: spread Islam to 40% if iGoal == 0: fReligionPercent = game.calculateReligionPercent(iIslam) if fReligionPercent >= 40.0: return 1 # second Muslim goal: settle seven great people in the Muslim holy city elif iGoal == 1: iCount = 0 pHolyCity = game.getHolyCity(iIslam) for iGreatPerson in lGreatPeople: iCount += pHolyCity.getFreeSpecialistCount(iGreatPerson) if iCount >= 7: return 1 # third Muslim goal: control five shrines elif iGoal == 2: iCount = 0 for iReligion in range(iNumReligions): iCount += getNumBuildings(iPlayer, iShrine + 4*iReligion) if iCount >= 5: return 1 elif iVictoryType == iHinduism: # first Hindu goal: settle five different great people in the Hindu holy city if iGoal == 0: iCount = 0 pHolyCity = game.getHolyCity(iHinduism) for iGreatPerson in lGreatPeople: if pHolyCity.getFreeSpecialistCount(iGreatPerson) > 0: iCount += 1 if iCount >= 5: return 1 # second Hindu goal: experience 24 turns of golden age elif iGoal == 1: if data.iHinduGoldenAgeTurns >= turns(24): return 1 # third Hindu goal: make sure the five largest cities in the world are Hindu elif iGoal == 2: if countBestCitiesReligion(iHinduism, cityPopulation, 5) >= 5: return 1 elif iVictoryType == iBuddhism: # first Buddhist goal: be at peace for 100 turns if iGoal == 0: if data.iBuddhistPeaceTurns >= turns(100): return 1 # second Buddhist goal: have the highest approval rating for 100 turns elif iGoal == 1: if data.iBuddhistHappinessTurns >= turns(100): return 1 # third Buddhist goal: have cautious or better relations with all civilizations in the world elif iGoal == 2: if countGoodRelationPlayers(iPlayer, AttitudeTypes.ATTITUDE_CAUTIOUS) >= countLivingPlayers()-1: return 1 elif iVictoryType == iConfucianism: # first Confucian goal: have friendly relations with five civilizations if iGoal == 0: if countGoodRelationPlayers(iPlayer, AttitudeTypes.ATTITUDE_FRIENDLY) >= 5: return 1 # second Confucian goal: have five wonders in the Confucian holy city elif iGoal == 1: pHolyCity = game.getHolyCity(iConfucianism) if countCityWonders(iPlayer, (pHolyCity.getX(), pHolyCity.getY()), True)
= self.get_guild(int(data["guild_id"])) if not guild: return member = Member(self, data, guild) guild._members[member.id] = member self.dispatch("guild_member_add", member) async def parse_guild_member_remove(self, data: Dict) -> None: """Parses the ``GUILD_MEMBER_REMOVE`` event. This method parses the raw data received from the ``GUILD_MEMBER_REMOVE`` event once received from the gateway. This method calls :meth:`.State.dispatch` with two payloads (:class:`.Guild` and :class:`.User`) Parameters ---------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return user = self.create_user(data["user"]) guild._members.pop(user.id, None) self.dispatch("guild_member_remove", guild, user) async def parse_guild_member_update(self, data: Dict) -> None: """Parses the ``GUILD_MEMBER_UPDATE`` event. This method parses the raw data received from the ``GUILD_MEMBER_UPDATE`` event once received from the gateway. This method calls :meth:`.State.dispatch` with two payloads before (:class:`.Member`) and after (:class:`.Member`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return member = guild.get_member(int(data["user"]["id"])) if member is None: return after = self.create_member(data, guild) guild._members[member.id] = after self.dispatch("guild_member_update", member, after) async def parse_guild_members_chunk(self, data: Dict) -> None: """Parses the ``GUILD_MEMBERS_CHUNK`` event. This method parses the raw data received from the ``GUILD_MEMBERS_CHUNK`` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Chunk`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ chunk = Chunk(self, data) for member in chunk.members: chunk.guild._members[member.id] = member self.dispatch("guild_members_chunk", chunk) async def parse_guild_role_create(self, data: Dict) -> None: """Parses the ``GUILD_ROLE_CREATE`` event. This method parses the raw data received from the ``GUILD_ROLE_CREATE`` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Role`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return role = Role(self, data, guild) guild._roles[role.id] = role self.dispatch("guild_role_create", role) async def parse_guild_role_update(self, data: Dict) -> None: """Parses the ``GUILD_ROLE_UPDATE`` event. This method parses the raw data received from the ``GUILD_ROLE_UPDATE`` event once received from the gateway. This method calls :meth:`.State.dispatch` with two payloads before (:class:`.Role`) and after (:class:`.Role`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return role = guild.get_role(int(data["role"]["id"])) if role is None: return before = role._copy() role._data = data["role"] self.dispatch("guild_role_update", before, role) async def parse_guild_role_delete(self, data: Dict) -> None: """Parses the ``GUILD_ROLE_DELETE`` event. This method parses the raw data received from the ``GUILD_ROLE_DELETE`` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Role`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return role_id = int(data["role_id"]) role = guild._roles.pop(role_id, None) if role is None: return self.dispatch("guild_role_delete", role) async def parse_integration_create(self, data: Dict) -> None: """Parses the `INTEGRATION_CREATE` event. This method parses the raw data received from the `INTEGRATION_CREATE` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Integration`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return integration = Integration(self, data, guild) self.dispatch("integration_create", integration) async def parse_integration_update(self, data: Dict) -> None: """Parses the `INTEGRATION_UPDATE` event. This method parses the raw data received from the `INTEGRATION_UPDATE` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Integration`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return integration = Integration(self, data, guild) self.dispatch("integration_update", integration) async def parse_integration_delete(self, data: Dict) -> None: """Parses the `INTEGRATION_DELETE` event. This method parses the raw data received from the `INTEGRATION_UPDATE` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Integration`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ guild = self.get_guild(int(data["guild_id"])) if not guild: return integration = DeletedIntegration(self, data, guild) self.dispatch("integration_delete", integration) async def parse_invite_create(self, data: Dict) -> None: """Parses the `INVITE_CREATE` event. This method parses the raw data received from the `INVITE_CREATE` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.Invite`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ invite = Invite(self, data) self.dispatch("invite_create", invite) async def parse_invite_delete(self, data: Dict) -> None: """Parses the `INVITE_DELETE` event. This method parses the raw data received from the `INVITE_DELETE` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (:class:`.DeletedInvite`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ invite = DeletedInvite(self, data) self.dispatch("invite_delete", invite) async def parse_message_delete_bulk(self, data: Dict) -> None: """Parses the `MESSAGE_DELETE_BULK` event. This method parses the raw data received from the `MESSAGE_DELETE_BULK` event once received from the gateway. This method calls :meth:`.State.dispatch` with one payload (a list of :class:`.Message` or :class:`.DeletedMessage`) Parameters ----------- data: :class:`dict` The raw data received from the gateway """ messages = [] for message_id in data["ids"]: message = self._messages.pop(message_id, None) if not message: d = data.copy() d["id"] = message_id message = DeletedMessage(d) # type: ignore messages.append(message) self.dispatch("message_delete_bulk", messages) def get_message(self, message_id: int) -> Optional[Message]: """Grabs a :class:`.Message` from the internal cache. Parameters ---------- message_id: :class:`int` The id of the message to get Returns ------- Optional[:class:`.Message`] The message if cached. """ return self._messages.get(message_id) def get_user(self, user_id: int) -> Optional[User]: """Grabs a :class:`.User` from the internal cache. Parameters ---------- user_id: :class:`int` The id of the user to get Returns ------- Optional[:class:`.User`] The user if cached. """ return self._users.get(user_id) def add_user(self, data: dict) -> User: """Creates a user then caches it. This method creates a new :class:`.User` object then caches it into :attr:`.State.users` Parameters ---------- data: :class:`dict` The data to produce a user with Returns ------- :class:`.User` The newly created user. """ user = User(self, data) self._users[user.id] = user return user def get_guild(self, guild_id: int) -> Optional[Guild]: """Grabs a :class:`.Guild` from the internal cache. Parameters ---------- guild_id: :class:`int` The id of the guild to get Returns ------- Optional[:class:`.Guild`] The guild if cached. """ return self._guilds.get(guild_id) def get_channel( self, channel_id: int ) -> Optional[Union[TextChannel, DMChannel, VoiceChannel, CategoryChannel, Channel]]: """Grabs a :class:`.Channel` from the internal cache. Parameters ---------- channel_id: :class:`int` The id of the channel to get Returns ------- Optional[Union[:class:`.Channel`, :class:`.DMChannel`]] The channel if cached. """ return self._channels.get(channel_id) def get_emoji(self, emoji_id: int) -> Optional[Emoji]: """Grabs an :class:`.Emoji` from the internal cache. Parameters ---------- emoji_id: :class:`int` The id of the emoji to get Returns ------- :class:`.Emoji` The emoji if cached. """ return self._emojis.get(emoji_id) def get_sticker(self, sticker_id: int) -> Optional[Sticker]: """ Gets a sticker from the cache. Parameters ---------- sticker_id: :class:`int` The ID of the sticker. Returns ------- Optional[:class:`.Sticker`] The sticker if it exists in the cache, otherwise ``None``. """ return self._stickers.get(sticker_id) def create_message(self, data: dict, channel: Any) -> Message: """Creates a :class:`.Message` instance. If you're wondering why this is here. Its to use as a syntactic sugar when sending messages. This is used in :meth:`.TextChannel.send` Parameters ---------- dict: :class:`dict` The raw data of the message channel: Any The channel of the message Returns ------- :class:`.Message` The created message instance. """ return Message(self, data, channel) # type: ignore def create_channel(self, data: dict, *args: Any) -> Union[TextChannel, VoiceChannel, CategoryChannel, Channel]: """Creates a :class:`.Channel` instance. If you're wondering why this is here. Its to use as a syntatic sugar for creating channels. Parameters ---------- data: :class:`dict` The raw data of the channel *args: Any Extra options to pass to the channel's constructor Returns ------- :class:`.Channel` The created channel instance. """ cls = self.CHANNEL_MAPPING.get(int(data["type"]), Channel) channel = cls(self, data, *args) self.create_overwrites(channel) # type: ignore return channel # type: ignore def create_guild_channels(self, guild: Guild, data: dict) -> Guild: """Creates the channels of a guild. Parameters ---------- guild: :class:`.Guild` The guild to create the channels for data: :class:`dict` The raw data of the guild Returns ------- :class:`.Guild` The guild which was passed in. """ if "channels" not in data: return guild channels = {int(payload["id"]): self.create_channel(payload, guild) for payload in data["channels"]} for id, channel in channels.items(): self._channels[id] = channel guild._channels = channels return guild def create_guild_members(self, guild: Guild,
from tython.lex import TokenType from tython.errors import SyntaxError from tython.types import Types from .parse_result import ParseResult from .nodes import * ############################################ # PARSER ############################################ class Parser: def __init__(self, tokens): self.tokens = tokens self.tok_idx = -1 self.var_type = None self.advance() def advance(self): self.tok_idx += 1 self.update_current_tok() return self.current_tok def reverse(self, amount=1): self.tok_idx -= amount self.update_current_tok() return self.current_tok def update_current_tok(self): if self.tok_idx < len(self.tokens): self.current_tok = self.tokens[self.tok_idx] def parse(self): res = self.statements() if not res.error and self.current_tok.type != TokenType.EOF: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected '+', '-', '*', '/', or '^'", ) ) return res ############################################ def statements(self): """ : NEWLINE* statement (NEWLINE+ statement)* NEWLINE* """ res = ParseResult() statements = [] pos_start = self.current_tok.pos_start.copy() while self.current_tok.type == TokenType.NEWLINE: res.register_advancement() self.advance() statement = res.register(self.statement()) if res.error: return res statements.append(statement) more_statements = True while True: newline_count = 0 while self.current_tok.type == TokenType.NEWLINE: res.register_advancement() self.advance() newline_count += 1 if newline_count == 0: more_statements = False if not more_statements: break statement = res.try_register(self.statement()) if not statement: self.reverse(res.to_reverse_count) more_statements = False continue statements.append(statement) return res.success( ListNode(statements, pos_start, self.current_tok.pos_end.copy()) ) def statement(self): """ : KEYWORD:return expr? : KEYWORD:continue : KEYWORD:break : expr """ res = ParseResult() pos_start = self.current_tok.pos_start.copy() # KEYWORD:return expr? if self.current_tok.matches(TokenType.KEYWORD, "return"): res.register_advancement() self.advance() expr = res.try_register(self.expr()) if not expr: self.reverse(res.to_reverse_count) return res.success( ReturnNode(expr, pos_start, self.current_tok.pos_start.copy()) ) # KEYWORD:continue if self.current_tok.matches(TokenType.KEYWORD, "continue"): res.register_advancement() self.advance() return res.success( ContinueNode(pos_start, self.current_tok.pos_start.copy()) ) # KEYWORD:break if self.current_tok.matches(TokenType.KEYWORD, "break"): res.register_advancement() self.advance() return res.success(BreakNode(pos_start, self.current_tok.pos_start.copy())) expr = res.register(self.expr()) if res.error: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_start.copy(), "Expected expression, 'return', 'continue', or 'break'", ) ) return res.success(expr) def expr(self): """ : TYPE:var|int|float|str|num IDENTIFIER EQ expr : comp-expr ((KEYWORD:AND|KEYWORD:OR) comp-expr)* """ res = ParseResult() if self.current_tok.type == TokenType.TYPE: type = self.current_tok res.register_advancement() self.advance() if self.current_tok.type != TokenType.IDENTIFIER: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected identifier", ) ) var_name = self.current_tok res.register_advancement() self.advance() if self.current_tok.type != TokenType.EQ: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected '='", ) ) if type.matches(TokenType.TYPE, Types.Number.value): self.var_type = Types.Number elif type.matches(TokenType.TYPE, Types.Int.value): self.var_type = Types.Int elif type.matches(TokenType.TYPE, Types.Float.value): self.var_type = Types.Float elif type.matches(TokenType.TYPE, Types.String.value): self.var_type = Types.String else: self.var_type = Types.Any res.register_advancement() self.advance() expr = res.register(self.expr()) if res.error: return res return res.success(VarAssignNode(var_name, expr, self.var_type)) node = res.register( self.bin_op( self.comp_expr, ((TokenType.KEYWORD, "and"), (TokenType.KEYWORD, "or")) ) ) if res.error: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected expression", ) ) return res.success(node) def comp_expr(self): """ : NOT comp-expr : arith-expr ((EE|LT|GT|LTE|GTE) arith-expr)* """ res = ParseResult() if self.current_tok.matches(TokenType.KEYWORD, "not"): op_tok = self.current_tok res.register_advancement() self.advance() node = res.register(self.comp_expr()) if res.error: return res return res.success(UnaryOpNode(op_tok, node)) node = res.register( self.bin_op( self.arith_expr, ( TokenType.EE, TokenType.NE, TokenType.LT, TokenType.GT, TokenType.LTE, TokenType.GTE, ), ) ) if res.error: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected int, float, identifier, '+', '-', '(', or 'not'", ) ) return res.success(node) def arith_expr(self): """ term ((PLUS|MINUS) term)* """ return self.bin_op(self.term, (TokenType.PLUS, TokenType.MINUS)) def term(self): """ : factor ((MUL | DIV) factor)* """ return self.bin_op(self.factor, (TokenType.MUL, TokenType.DIV)) def factor(self): """ : (PLUS|MINUS) factor : power """ res = ParseResult() tok = self.current_tok if tok.type in (TokenType.PLUS, TokenType.MINUS): res.register_advancement() self.advance() factor = res.register(self.factor()) if res.error: return res return res.success(UnaryOpNode(tok, factor)) return self.power() def power(self): """ : call (POWER factor)* """ return self.bin_op(self.call, (TokenType.POWER,), self.factor) def call(self): """ : atom (LPAREN (expr (COMMA expr)*)? RPAREN)? """ res = ParseResult() atom = res.register(self.atom()) if res.error: return res if self.current_tok.type == TokenType.LPAREN: res.register_advancement() self.advance() arg_nodes = [] if self.current_tok.type == TokenType.RPAREN: res.register_advancement() self.advance() else: arg_nodes.append(res.register(self.expr())) if res.error: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected expression", ) ) while self.current_tok.type == TokenType.COMMA: res.register_advancement() self.advance() arg_nodes.append(res.register(self.expr())) if res.error: return res if self.current_tok.type != TokenType.RPAREN: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected ',' or ')'", ) ) res.register_advancement() self.advance() return res.success(CallNode(atom, arg_nodes)) return res.success(atom) def atom(self): """ : INT|FLOAT|NUMBER|STRING|IDENTIFIER : LPAREN expr RPAREN : list-expr : if-expr : for-expr : while-expr : func-def """ res = ParseResult() tok = self.current_tok if tok.type == TokenType.INT: res.register_advancement() self.advance() return res.success(IntNode(tok, self.var_type)) elif tok.type == TokenType.FLOAT: res.register_advancement() self.advance() return res.success(FloatNode(tok, self.var_type)) elif tok.type == TokenType.STRING: res.register_advancement() self.advance() return res.success(StringNode(tok, self.var_type)) elif tok.type == TokenType.IDENTIFIER: res.register_advancement() self.advance() if self.current_tok.type == TokenType.DOT: res.register_advancement() self.advance() if self.current_tok.type == TokenType.METHOD: print("method") else: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected method", ) ) return res.success(VarAccessNode(tok)) elif tok.type == TokenType.LPAREN: res.register_advancement() self.advance() expr = res.register(self.expr()) if res.error: return res if self.current_tok.type == TokenType.RPAREN: res.register_advancement() self.advance() return res.success(expr) else: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected ')'", ) ) elif tok.type == TokenType.LSQUARE: list_expr = res.register(self.list_expr()) if res.error: return res return res.success(list_expr) elif tok.matches(TokenType.KEYWORD, "if"): if_expr = res.register(self.if_expr()) if res.error: return res return res.success(if_expr) elif tok.matches(TokenType.KEYWORD, "for"): for_expr = res.register(self.for_expr()) if res.error: return res return res.success(for_expr) elif tok.matches(TokenType.KEYWORD, "while"): while_expr = res.register(self.while_expr()) if res.error: return res return res.success(while_expr) elif tok.matches(TokenType.KEYWORD, "def"): func_def = res.register(self.func_def()) if res.error: return res return res.success(func_def) return res.failure( SyntaxError( tok.pos_start, tok.pos_end, "Expected int, float, identifier, '+', '-' or '('", ) ) def list_expr(self): """ : LSQUARE (expr (COMMA expr)*)? RSQUARE """ res = ParseResult() element_nodes = [] pos_start = self.current_tok.pos_start.copy() if self.current_tok.type != TokenType.LSQUARE: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected '['" ) ) res.register_advancement() self.advance() if self.current_tok.type == TokenType.RSQUARE: res.register_advancement() self.advance() else: element_nodes.append(res.register(self.expr())) if res.error: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected expression or ']'", ) ) while self.current_tok.type == TokenType.COMMA: res.register_advancement() self.advance() element_nodes.append(res.register(self.expr())) if res.error: return res if self.current_tok.type != TokenType.RSQUARE: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected ',' or ']'", ) ) res.register_advancement() self.advance() return res.success( ListNode(element_nodes, pos_start, self.current_tok.pos_end.copy()) ) def if_expr(self): """ : KEYWORD:if expr KEYWORD:: (statement if-expr-b|if-expr-c?) | (NEWLINE statements KEYWORD:stop|if-expr-b|if-expr-c) """ res = ParseResult() all_cases = res.register(self.if_expr_cases("if")) if res.error: return res cases, else_case = all_cases # type: ignore return res.success(IfNode(cases, else_case)) def if_expr_b(self): """ : KEYWORD:elif expr KEYWORD:: (statement if-expr-b|if-expr-c?) | (NEWLINE statements KEYWORD:stop|if-expr-b|if-expr-c) """ return self.if_expr_cases("elif") def if_expr_c(self): """ : KEYWORD:else statement | (NEWLINE statements KEYWORD:stop) """ res = ParseResult() else_case = None if self.current_tok.matches(TokenType.KEYWORD, "else"): res.register_advancement() self.advance() if self.current_tok.type == TokenType.NEWLINE: res.register_advancement() self.advance() statements = res.register(self.statements()) if res.error: return res else_case = (statements, True) if self.current_tok.matches(TokenType.KEYWORD, "stop"): res.register_advancement() self.advance() else: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, "Expected 'stop'", ) ) else: expr = res.register(self.expr()) if res.error: return res else_case = (expr, False) return res.success(else_case) def if_expr_b_or_c(self): res = ParseResult() cases, else_case = [], None if self.current_tok.matches(TokenType.KEYWORD, "elif"): all_cases = res.register(self.if_expr_b()) if res.error: return res cases, else_case = all_cases else: else_case = res.register(self.if_expr_c()) if res.error: return res return res.success((cases, else_case)) def if_expr_cases(self, case_keyword): res = ParseResult() cases = [] else_case = None if not self.current_tok.matches(TokenType.KEYWORD, case_keyword): return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected '{case_keyword}'", ) ) res.register_advancement() self.advance() condition = res.register(self.expr()) if res.error: return res if not self.current_tok.matches(TokenType.KEYWORD, ":"): return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected ':'", ) ) res.register_advancement() self.advance() if self.current_tok.type == TokenType.NEWLINE: res.register_advancement() self.advance() statements = res.register(self.statements()) if res.error: return res cases.append((condition, statements, True)) if self.current_tok.matches(TokenType.KEYWORD, "stop"): res.register_advancement() self.advance() else: all_cases = res.register(self.if_expr_b_or_c()) if res.error: return res new_cases, else_case = all_cases # type: ignore cases.extend(new_cases) else: expr = res.register(self.statement()) if res.error: return res cases.append((condition, expr, False)) all_cases = res.register(self.if_expr_b_or_c()) if res.error: return res new_cases, else_case = all_cases # type: ignore cases.extend(new_cases) return res.success((cases, else_case)) def for_expr(self): """ : KEYWORD:FOR IDENTIFIER EQ expr KEYWORD:TO expr (KEYWORD:STEP expr)? KEYWORD:: statement | (NEWLINE statements KEYWORD:stop) """ res = ParseResult() if not self.current_tok.matches(TokenType.KEYWORD, "for"): return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected 'for'", ) ) res.register_advancement() self.advance() if self.current_tok.type != TokenType.IDENTIFIER: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected identifier", ) ) var_name = self.current_tok res.register_advancement() self.advance() if self.current_tok.type != TokenType.EQ: return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected '='", ) ) res.register_advancement() self.advance() start_value = res.register(self.expr()) if res.error: return res if not self.current_tok.matches(TokenType.KEYWORD, "to"): return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected 'to'", ) ) res.register_advancement() self.advance() end_value = res.register(self.expr()) if res.error: return res if self.current_tok.matches(TokenType.KEYWORD, "step"): res.register_advancement() self.advance() step_value = res.register(self.expr()) if res.error: return res else: step_value = None if not self.current_tok.matches(TokenType.KEYWORD, ":"): return res.failure( SyntaxError( self.current_tok.pos_start, self.current_tok.pos_end, f"Expected ':'", ) ) res.register_advancement() self.advance() if self.current_tok.type == TokenType.NEWLINE: res.register_advancement() self.advance() body = res.register(self.statements()) if res.error: return res if not self.current_tok.matches(TokenType.KEYWORD,
if value.get('assay_term_name') not in controlRequiredAssayList: return # single cell RNA-seq in E4 do not require controls (ticket WOLD-6) if value.get('assay_term_name') == 'single cell isolation followed by RNA-seq' and \ check_award_condition(value, ["ENCODE4"]): return # We do not want controls if value.get('control_type'): return audit_level = 'ERROR' if value.get('assay_term_name') in ['CAGE', 'RAMPAGE'] or \ check_award_condition(value, ["ENCODE2", "Roadmap", "modENCODE", "ENCODE2-Mouse"]): audit_level = 'NOT_COMPLIANT' if value['possible_controls'] == []: detail = ('possible_controls is a list of experiment(s) that can ' 'serve as analytical controls for a given experiment. ' '{} experiments require a value in possible_controls. ' 'This experiment should be associated with at least one control ' 'experiment, but has no specified values in the possible_controls list.'.format( value['assay_term_name'] ) ) yield AuditFailure('missing possible_controls', detail, level=audit_level) return for control in value['possible_controls']: if not is_matching_biosample_control( control, value.get('biosample_ontology', {}).get('term_id')): detail = ('The specified control {} ' 'for this experiment is on {}, ' 'but this experiment is done on {}.'.format( audit_link(path_to_text(control['@id']), control['@id']), control.get('biosample_ontology', {}).get('term_name'), value['biosample_ontology']['term_name'] ) ) yield AuditFailure('inconsistent control', detail, level='ERROR') return def is_matching_biosample_control(dataset, biosample_term_id): if dataset['@type'][0] == 'Experiment': return dataset.get('biosample_ontology', {}).get('term_id') == biosample_term_id elif (not dataset.get('biosample_ontology') or any([term['term_id'] != biosample_term_id for term in dataset.get('biosample_ontology')])): return False return True def audit_experiment_platforms_mismatches(value, system, files_structure): if value['status'] in ['deleted', 'replaced']: return # do not apply the audit to DNase-seq and genetic modification followed by DNase-seq if value.get("assay_term_id") in ["OBI:0001853", "NTR:0004774"]: return if not files_structure.get('original_files'): return platforms = get_platforms_used_in_experiment(files_structure) if len(platforms) > 1: platforms_string = str(list(platforms)).replace('\'', '') detail = ('This experiment ' 'contains data produced on incompatible ' 'platforms {}.'.format(platforms_string)) yield AuditFailure('inconsistent platforms', detail, level='WARNING') elif len(platforms) == 1: platform_term_name = list(platforms)[0] if 'possible_controls' in value and \ value['possible_controls'] != []: for control in value['possible_controls']: if control.get('original_files'): control_platforms = get_platforms_used_in_experiment( create_files_mapping(control.get('original_files'), files_structure.get('excluded_types'))) if len(control_platforms) > 1: control_platforms_string = str( list(control_platforms)).replace('\'', '') detail = ('possible_controls is a list of experiment(s) that can serve ' 'as analytical controls for a given experiment. ' 'Experiment {} found in possible_controls list of this experiment ' 'contains data produced on platform(s) {} ' 'which are not compatible with platform {} ' 'used in this experiment.'.format( audit_link(path_to_text(control['@id']), control['@id']), control_platforms_string, platform_term_name ) ) yield AuditFailure('inconsistent platforms', detail, level='WARNING') elif len(control_platforms) == 1 and \ list(control_platforms)[0] != platform_term_name: detail = ('possible_controls is a list of experiment(s) that can serve ' 'as analytical controls for a given experiment. ' 'Experiment {} found in possible_controls list of this experiment ' 'contains data produced on platform {} ' 'which is not compatible with platform {} ' 'used in this experiment.'.format( audit_link(path_to_text(control['@id']), control['@id']), list(control_platforms)[0], platform_term_name ) ) yield AuditFailure('inconsistent platforms', detail, level='WARNING') return def audit_experiment_ChIP_control(value, system, files_structure): if not check_award_condition(value, [ 'ENCODE3', 'ENCODE4', 'Roadmap']): return if value['status'] in ['deleted', 'replaced', 'revoked']: return # Currently controls are only be required for ChIP-seq if value.get('assay_term_name') != 'ChIP-seq': return # We do not want controls if value.get('control_type'): return if not value['possible_controls']: return num_IgG_controls = 0 for control_dataset in value['possible_controls']: if not is_control_dataset(control_dataset): detail = ( 'Experiment {} is ChIP-seq but its control {} does not ' 'have a valid "control_type".'.format( audit_link(path_to_text(value['@id']), value['@id']), audit_link(path_to_text(control_dataset['@id']), control_dataset['@id']) ) ) yield AuditFailure('invalid possible_control', detail, level='ERROR') return if not control_dataset.get('replicates'): continue if 'antibody' in control_dataset.get('replicates')[0]: num_IgG_controls += 1 # If all of the possible_control experiments are mock IP control experiments if num_IgG_controls == len(value['possible_controls']): if value.get('assay_term_name') == 'ChIP-seq': # The binding group agreed that ChIP-seqs all should have an input control. detail = ('Experiment {} is ChIP-seq and requires at least one input control,' ' as agreed upon by the binding group. Experiment {} is not an input control'.format( audit_link(path_to_text(value['@id']), value['@id']), audit_link(path_to_text(control_dataset['@id']), control_dataset['@id']) ) ) yield AuditFailure('missing input control', detail, level='NOT_COMPLIANT') return def is_control_dataset(dataset): return bool(dataset.get('control_type')) def audit_experiment_spikeins(value, system, excluded_types): if not check_award_condition(value, [ "ENCODE3", "ENCODE4", "modERN", "ENCODE", "ENCODE2-Mouse", "Roadmap"]): return ''' All ENCODE 3 long (>200) RNA-seq experiments should specify their spikeins. The spikeins specified should have datasets of type spikeins. The spikeins datasets should have a fasta file, a document, and maybe a tsv ''' if value['status'] in ['deleted', 'replaced']: return if value.get('assay_term_name') != 'RNA-seq': return for rep in value['replicates']: lib = rep.get('library') if lib is None: continue size_range = lib.get('size_range') if size_range != '>200': continue spikes = lib.get('spikeins_used') if (spikes is None) or (spikes == []): detail = ('Library {} is in ' 'an RNA-seq experiment and has size_range >200. ' 'It requires a value for spikeins_used'.format( audit_link(path_to_text(lib['@id']), lib['@id']) ) ) yield AuditFailure('missing spikeins', detail, level='NOT_COMPLIANT') # Informattional if ENCODE2 and release error if ENCODE3 return def audit_experiment_biosample_term(value, system, excluded_types): if value['status'] in ['deleted', 'replaced']: return if value.get('biosample_ontology', {}).get('classification') == 'cell-free sample': return ontology = system['registry']['ontology'] term_id = value.get('biosample_ontology', {}).get('term_id') term_type = value.get('biosample_ontology', {}).get('classification') term_name = value.get('biosample_ontology', {}).get('term_name') if 'biosample_ontology' not in value: detail = ('Biosample {} is missing biosample_ontology'.format( audit_link(path_to_text(value['@id']), value['@id'])) ) yield AuditFailure('missing biosample_ontology', detail, level='ERROR') # The type and term name should be put into dependencies if term_id.startswith('NTR:'): detail = ('Experiment {} has an NTR biosample {} - {}'.format( audit_link(path_to_text(value['@id']), value['@id']), term_id, term_name) ) yield AuditFailure('NTR biosample', detail, level='INTERNAL_ACTION') else: if term_id not in ontology: detail = ('Experiment {} has term_id {} which is not in ontology'.format( audit_link(path_to_text(value['@id']), value['@id']), term_id) ) yield AuditFailure('term_id not in ontology', term_id, level='INTERNAL_ACTION') else: ontology_name = ontology[term_id]['name'] if ontology_name != term_name and term_name not in ontology[term_id]['synonyms']: detail = ('Experiment {} has a mismatch between biosample term_id ({}) ' 'and term_name ({}), ontology term_name for term_id {} ' 'is {}.'.format( audit_link(path_to_text(value['@id']), value['@id']), term_id, term_name, term_id, ontology_name ) ) yield AuditFailure('inconsistent ontology term', detail, level='ERROR') if 'replicates' in value: for rep in value['replicates']: if 'library' not in rep: continue lib = rep['library'] if 'biosample' not in lib: detail = ('Library {} is missing biosample, expecting one of type {}'.format( audit_link(path_to_text(lib['@id']), lib['@id']), term_name) ) yield AuditFailure('missing biosample', detail, level='ERROR') continue biosample = lib['biosample'] bs_type = biosample.get('biosample_ontology', {}).get('@id') bs_name = biosample.get('biosample_ontology', {}).get('name') experiment_bs_type = value.get('biosample_ontology', {}).get('@id') experiment_bs_name = value.get('biosample_ontology', {}).get('name') if bs_type != experiment_bs_type: detail = ("Experiment {} contains a library {} linked to biosample " "type '{}', while experiment's biosample type is '{}'.".format( audit_link(path_to_text(value['@id']), value['@id']), audit_link(path_to_text(lib['@id']), lib['@id']), audit_link(path_to_text(bs_type), bs_type), audit_link(path_to_text(experiment_bs_type), experiment_bs_type) ) ) yield AuditFailure('inconsistent library biosample', detail, level='ERROR') return def audit_experiment_antibody_characterized(value, system, excluded_types): '''Check that biosample in the experiment has been characterized for the given antibody.''' if not check_award_condition(value, [ 'ENCODE4', 'ENCODE3', 'modERN']): return if value['status'] in ['deleted']: return if value.get('assay_term_name') not in targetBasedAssayList: return target = value.get('target') if not target: return if value.get('control_type'): return if value['assay_term_name'] in ['RNA Bind-n-Seq', 'shRNA knockdown followed by RNA-seq', 'siRNA knockdown followed by RNA-seq', 'CRISPRi followed by RNA-seq']: return for rep in value['replicates']: antibody = rep.get('antibody') lib = rep.get('library') if not antibody or not lib: continue biosample = lib.get('biosample') if not biosample: continue organism = biosample.get('organism') antibody_targets = antibody.get('targets', []) ab_targets_investigated_as = set() sample_match = False for t in antibody_targets: for i in t['investigated_as']: ab_targets_investigated_as.add(i) characterized = False # ENCODE4 tagged antibodies are characterized differently (ENCD-4608) ab_award = system.get('request').embed( antibody['award'], '@@object?skip_calculated=true' )['rfa'] if ( ab_award == 'ENCODE4' and ( 'tag' in ab_targets_investigated_as or 'synthetic tag' in ab_targets_investigated_as ) ): characterized = bool(antibody['used_by_biosample_characterizations']) else: characterized = bool(antibody['characterizations']) if not characterized: detail = ('Antibody {} has not yet been characterized in any cell type or tissue in {}.'.format( audit_link(path_to_text(antibody['@id']), antibody['@id']), path_to_text(organism) ) ) yield AuditFailure('uncharacterized antibody', detail, level='NOT_COMPLIANT') return # We only want the audit raised if the organism in lot reviews matches that of the biosample # and if has not been characterized to standards. Otherwise, it doesn't apply and we # shouldn't raise a stink if 'histone' in ab_targets_investigated_as: for lot_review in antibody['lot_reviews']: if lot_review['organisms'] and organism == lot_review['organisms'][0]: sample_match = True if lot_review['status'] == 'characterized to standards with exemption': detail = ('Antibody {} has been characterized ' 'to the standard with exemption for {}'.format( audit_link(path_to_text(antibody['@id']), antibody['@id']), path_to_text(organism) ) ) yield AuditFailure('antibody characterized with exemption', detail, level='WARNING') elif lot_review['status'] == 'awaiting characterization': detail = ('Antibody {} has not yet been characterized in
<filename>google/cloud/iam_credentials_v1/services/iam_credentials/client.py # -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.iam_credentials_v1.types import common from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import IAMCredentialsTransport from .transports.grpc import IAMCredentialsGrpcTransport from .transports.grpc_asyncio import IAMCredentialsGrpcAsyncIOTransport class IAMCredentialsClientMeta(type): """Metaclass for the IAMCredentials client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[IAMCredentialsTransport]] _transport_registry["grpc"] = IAMCredentialsGrpcTransport _transport_registry["grpc_asyncio"] = IAMCredentialsGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[IAMCredentialsTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class IAMCredentialsClient(metaclass=IAMCredentialsClientMeta): """A service account is a special type of Google account that belongs to your application or a virtual machine (VM), instead of to an individual end user. Your application assumes the identity of the service account to call Google APIs, so that the users aren't directly involved. Service account credentials are used to temporarily assume the identity of the service account. Supported credential types include OAuth 2.0 access tokens, OpenID Connect ID tokens, self- signed JSON Web Tokens (JWTs), and more. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "iamcredentials.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, IAMCredentialsTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the iam credentials client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.IAMCredentialsTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, IAMCredentialsTransport): # transport is a IAMCredentialsTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def generate_access_token( self, request: common.GenerateAccessTokenRequest = None, *, name: str = None, delegates: Sequence[str] = None, scope: Sequence[str] = None, lifetime: duration.Duration = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> common.GenerateAccessTokenResponse: r"""Generates an OAuth 2.0 access token for a service account. Args: request (:class:`~.common.GenerateAccessTokenRequest`): The request object. name (:class:`str`): Required. The resource name of the service account for which the credentials are requested, in the following format: ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-`` wildcard character is required; replacing it with a project ID is invalid. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. delegates (:class:`Sequence[str]`): The sequence of service accounts in a delegation chain. Each service account must be granted the ``roles/iam.serviceAccountTokenCreator`` role on its next service account in the chain. The last service account in the chain must be granted the ``roles/iam.serviceAccountTokenCreator`` role on the service account that is specified in the ``name`` field of the request. The delegates must have the following format: ``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``. The ``-`` wildcard character is required; replacing it with a project ID is invalid. This corresponds to the ``delegates`` field on the ``request`` instance; if ``request`` is provided, this should not be set. scope (:class:`Sequence[str]`): Required. Code to identify the scopes to be included in the OAuth 2.0 access token. See https://developers.google.com/identity/protocols/googlescopes for more information. At least one value required. This corresponds to the ``scope`` field on the ``request`` instance; if ``request`` is provided, this should not be set. lifetime (:class:`~.duration.Duration`): The desired lifetime duration of the access token in seconds. Must be set to a value less than or equal to 3600 (1 hour). If a value is not specified, the token's lifetime will be set to a default value of one hour. This corresponds to the ``lifetime`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.common.GenerateAccessTokenResponse: """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([name, delegates, scope, lifetime]): raise ValueError( "If the `request` argument is set, then
rmw byte w/o wrap-around mpu.pc = 0x200 mpu.a[0] = 0x7F zp = 0x7F mpu.memory[mpu.pc] = zp self.mask = mpu.wordMask self.rmwVal = 0x55AA self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0xFE] = mpu.byteMask & self.rmwVal mpu.memory[0xFF] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) tmp1 = mpu.byteMask & mpu.memory[0xFE] tmp2 = mpu.byteMask & mpu.memory[0xFF] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index < 512, index + unsigned offset rmw byte w/ wrap-around mpu.pc = 0x200 mpu.a[0] = 0x80 zp = 0x7F mpu.memory[mpu.pc] = zp self.mask = mpu.wordMask self.rmwVal = 0x6633 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0xFF] = mpu.byteMask & self.rmwVal mpu.memory[0x00] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) tmp1 = mpu.byteMask & mpu.memory[0xFF] tmp2 = mpu.byteMask & mpu.memory[0x00] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index > 511, index + signed offset rmw byte, no wrap-around mpu.pc = 0x200 mpu.a[0] = 0x281 zp = 0x80 mpu.memory[mpu.pc] = zp self.mask = mpu.wordMask self.rmwVal = 0xAA55 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = mpu.byteMask & self.rmwVal mpu.memory[0x202] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) tmp1 = mpu.byteMask & mpu.memory[0x201] tmp2 = mpu.byteMask & mpu.memory[0x202] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # rmw_zpX (flags: {osx, ind, siz} = {0, 1, 0}) def test_rmw_zpX_indirect_byte(self): stdout = StringIO() mon = Monitor(stdout = stdout) mpu = mon._mpu mpu.osx = False; mpu.ind = True; mpu.siz = False; mpu.oax = True # index < 512, index + unsigned offset rmw indirect byte no wrap-around mpu.pc = 0x200 mpu.a[0] = 0x7F zp = 0x7F mpu.memory[mpu.pc] = zp mpu.memory[0xFE] = 0x01 mpu.memory[0xFF] = 0x02 self.mask = mpu.byteMask self.rmwVal = 0x55 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = self.rmwVal pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) data = mpu.byteMask & mpu.memory[0x201] self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index < 512, index + unsigned offset rmw indirect byte w/ wrap-around mpu.oax = False mpu.pc = 0x200 mpu.x[0] = 0x7F zp = 0x80 mpu.memory[mpu.pc] = zp mpu.memory[0xFF] = 0x01 mpu.memory[0x00] = 0x02 self.rmwVal = 0xAA self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = self.rmwVal pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) data = mpu.byteMask & mpu.memory[0x201] self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index > 511, index + signed offset rmw indirect byte, no wrap-around mpu.pc = 0x200 mpu.x[0] = 0x281 zp = 0x80 mpu.memory[mpu.pc] = zp mpu.memory[0x201] = 0x03 mpu.memory[0x202] = 0x02 self.rmwVal = 0x66 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x203] = self.rmwVal pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) data = mpu.byteMask & mpu.memory[0x203] self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # rmw_zpX (flags: {osx, ind, siz} = {0, 1, 1}) def test_rmw_zpX_indirect_word(self): stdout = StringIO() mon = Monitor(stdout = stdout) mpu = mon._mpu mpu.osx = False; mpu.ind = True; mpu.siz = True; mpu.oax = True # index < 512, index + unsigned offset rmw indirect word no wrap-around mpu.pc = 0x200 mpu.a[0] = 0x7F zp = 0x7F mpu.memory[mpu.pc] = zp mpu.memory[0xFE] = 0x01 mpu.memory[0xFF] = 0x02 self.mask = mpu.wordMask self.rmwVal = 0x55AA self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = mpu.byteMask & self.rmwVal mpu.memory[0x202] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.ro_zpX(self.op) tmp1 = mpu.byteMask & mpu.memory[0x201] tmp2 = mpu.byteMask & mpu.memory[0x202] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index < 512, index + unsigned offset rmw indirect word w/ wrap-around mpu.pc = 0x200 mpu.a[0] = 0x80 zp = 0x7F mpu.memory[mpu.pc] = zp mpu.memory[0xFF] = 0x01 mpu.memory[0x00] = 0x02 self.mask = mpu.wordMask self.rmwVal = 0xAA55 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = mpu.byteMask & self.rmwVal mpu.memory[0x202] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.ro_zpX(self.op) tmp1 = mpu.byteMask & mpu.memory[0x201] tmp2 = mpu.byteMask & mpu.memory[0x202] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index > 511, index + signed offset rmw indirect word, no wrap-around mpu.pc = 0x200 mpu.a[0] = 0x281 zp = 0x80 mpu.memory[mpu.pc] = zp mpu.memory[0x201] = 0x03 mpu.memory[0x202] = 0x02 self.mask = mpu.wordMask self.rmwVal = 0xAA55 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x203] = mpu.byteMask & self.rmwVal mpu.memory[0x204] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.ro_zpX(self.op) tmp1 = mpu.byteMask & mpu.memory[0x203] tmp2 = mpu.byteMask & mpu.memory[0x204] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # rmw_zpX (flags: {osx, ind, siz} = {1, 0, 0}) def test_rmw_zpX_stk_relative_byte(self): stdout = StringIO() mon = Monitor(stdout = stdout) mpu = mon._mpu mpu.osx = True; mpu.ind = False; mpu.siz = False; # index < 512, index + unsigned offset rmw byte w/o wrap-around mpu.pc = 0x200 mpu.sp[1] = 0x180 zp = 0x7F mpu.memory[mpu.pc] = zp self.mask = mpu.byteMask self.rmwVal = 0x55 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x1FF] = self.rmwVal pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) data = mpu.byteMask & mpu.memory[0x1FF] self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index < 512, index + unsigned offset rmw byte w/ wrap-around mpu.pc = 0x200 mpu.sp[1] = 0x180 zp = 0x80 mpu.memory[mpu.pc] = zp self.mask = mpu.byteMask self.rmwVal = 0xAA self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x100] = self.rmwVal pc = mpu.pc + 1 mpu.ro_zpX(self.op) data = mpu.byteMask & mpu.memory[0x100] self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index > 511, index + signed offset rmw byte, no wrap-around mpu.pc = 0x200 mpu.sp[1] = 0x281 zp = 0x80 mpu.memory[mpu.pc] = zp self.mask = mpu.byteMask self.rmwVal = 0x66 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = self.rmwVal pc = mpu.pc + 1 mpu.ro_zpX(self.op) data = mpu.wordMask & mpu.memory[0x201] self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # rmw_zpX (flags: {osx, ind, siz} = {1, 0, 1}) def test_rmw_zpX_stk_relative_word(self): stdout = StringIO() mon = Monitor(stdout = stdout) mpu = mon._mpu mpu.osx = True; mpu.ind = False; mpu.siz = True; # index < 512, index + unsigned offset rmw byte w/o wrap-around mpu.pc = 0x200 mpu.sp[1] = 0x17F zp = 0x7F mpu.memory[mpu.pc] = zp self.mask = mpu.wordMask self.rmwVal = 0x55AA self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x1FE] = mpu.byteMask & self.rmwVal mpu.memory[0x1FF] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) tmp1 = mpu.byteMask & mpu.memory[0x1FE] tmp2 = mpu.byteMask & mpu.memory[0x1FF] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index < 512, index + unsigned offset rmw byte w/ wrap-around mpu.pc = 0x200 mpu.sp[1] = 0x180 zp = 0x7F mpu.memory[mpu.pc] = zp self.mask = mpu.wordMask self.rmwVal = 0xAA55 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x1FF] = mpu.byteMask & self.rmwVal mpu.memory[0x100] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) tmp1 = mpu.byteMask & mpu.memory[0x1FF] tmp2 = mpu.byteMask & mpu.memory[0x100] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # index > 511, index + signed offset rmw byte, no wrap-around mpu.pc = 0x200 mpu.sp[1] = 0x281 zp = 0x80 mpu.memory[mpu.pc] = zp self.mask = mpu.wordMask self.rmwVal = 0x6633 self.rtnVal = self.mask & (-self.rmwVal) mpu.memory[0x201] = mpu.byteMask & self.rmwVal mpu.memory[0x202] = mpu.byteMask & (self.rmwVal >> 8) pc = mpu.pc + 1 mpu.rmw_zpX(self.rmw) tmp1 = mpu.byteMask & mpu.memory[0x201] tmp2 = mpu.byteMask & mpu.memory[0x202] data = mpu.wordMask & ((tmp2 << 8) + tmp1) self.assertEqual(self.rtnVal, data) self.assertEqual(pc, mpu.pc) # rmw_zpX (flags: {osx, ind, siz} = {1, 1, 0}) def test_rmw_zpX_stk_relative_indirect_byte(self): stdout = StringIO() mon = Monitor(stdout = stdout) mpu = mon._mpu mpu.osx = True; mpu.ind = True; mpu.siz = False; # index < 512, index + unsigned offset rmw indirect byte no wrap-around
objects for catalog cross-matching; in arcseconds import_ZTF : Import ZTF data (True) or read existing file (False) import_OSC : Import OSC data (True) or read existing file (False) import_local : Import local data from ./photometry directory import_lightcurve : Regenerate existing lightcurve file (True) or read the existing out from ./lightcurves (False) reimport_catalog : Overwrite the existing 3PI/SDSS catalog search_radius : Search radius in arcminutes for the 3PI/SDSS catalog dust_map : 'SF' or 'SFD', to query Schlafy and Finkbeiner 2011 or Schlafy, Finkbeiner and Davis 1998. set to 'none' to not correct for extinction Pcc_filter : The effective magnitude, radius, and Pcc are calculated in this filter. Pcc_filter_alternative : If Pcc_filter is not found, use this one as an acceptable alternative. star_separation : A star needs to be this close to be matched to a transient [in Arcsec] star_cut : maximum allowed probability of an object to be a star date_range : Maximum number of light curve days from the first detection to use in fitting the light curve n_walkers : Number of walkers for MCMC n_steps : Number of steps for MCMC n_cores : Number of cores for MCMC model : 'single' or 'double' for the function to fit to the lightcurve. Use later one for full light curves. training_days : Which training set to use for classification hostless_cut : Only consider hosts with a Pcc lower than this sorting_state : Seed number for list sorter clean : 0 keeps all objects, and 1 removed objects that do not have a detected host SMOTE_state : Seed number for SMOTE clf_state : Seed number for classifier n_estimators : Number of trees for random forest max_depth : Depth of trees for random forest feature_set : Set of features to use neighbors : neighbors to use for star/galaxy separator recalculate_nature : Overwrite existing Nature column? classifier : Pick the classifier to use based on the available information either 'quick', 'redshift', 'host', 'late', or 'all'. If empty default (or specified) values will be used. n_samples : Number of random seeds to use, only for the 'all' classifier object_class : Transient type, to overwrite any existing classes plot_lightcurve : Save an output plot with the light curve and PS1 image? do_observability : Calculate Observavility from Magellan and MMT? save_features : Save the features table to a file overwrite_features : Overwrite features? Returns --------------- Predicted Probability to be ['Nuclear','SLSN-I','SLSN-II','SNII','SNIIb','SNIIn','SNIa','SNIbc','Star'] ''' print('\n################# FLEET #################') # Empty Features for if search failed if save_features: filename = '%s_%s/center_table_%s_%s_%s.txt'%(int(float(date_range)), model, int(float(date_range)), model, object_name_in) features = np.array( [object_name_in, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]) features_table = table.Table(features, names = ['object_name' , 'red_amplitude', 'red_amplitude2', 'red_offset', 'red_magnitude', 'green_amplitude', 'green_amplitude2', 'green_offset', 'green_magnitude', 'delta_time', 'input_separation', 'input_size', 'normal_separation', 'deltamag_red', 'deltamag_green', 'model_color', 'Pcc', 'redshift', 'absmag'], dtype = ['S25' , 'float64' , 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64']) # If the features file already exists, don't overwrite it if save_features: if not overwrite_features : if len(glob.glob(filename)) > 0: print('exists') return table.Table() ##### Basic transient info ##### ra_deg, dec_deg, transient_source, object_name, ztf_data, ztf_name, tns_name, object_class, osc_data = get_transient_info(object_name_in, ra_in, dec_in, object_class, acceptance_radius, import_ZTF, import_OSC, import_lightcurve) if ra_deg == '--': return table.Table() print('%s %s %s'%(object_name, ra_deg, dec_deg)) if dec_deg <= -32: print('dec = %s, too low for SDSS or 3PI'%dec_deg) if save_features : features_table.write(filename, format = 'ascii', overwrite = True) return table.Table() ##### Lightcurve data ##### output_table = generate_lightcurve(ztf_data, osc_data, object_name, ztf_name, tns_name, import_lightcurve, import_local) # Ignore selected patches output_table = ignore_data(object_name, output_table) if len(output_table) == 0: print('No data in lightcurve') if save_features : features_table.write(filename, format = 'ascii', overwrite = True) return table.Table() if np.sum((output_table['UL'] == 'False') & (output_table['Ignore'] == 'False')) == 0: print('No useable data in lightcurve') if save_features : features_table.write(filename, format = 'ascii', overwrite = True) return table.Table() # Extinction g_correct, r_correct = get_extinction(ra_deg, dec_deg, dust_map) ##### Fit Lightcurve ##### red_amplitude, red_amplitude2, red_offset, red_magnitude, green_amplitude, green_amplitude2, green_offset, green_magnitude, model_color, bright_mjd, first_mjd, green_brightest, red_brightest = fit_linex(output_table, date_range, n_walkers, n_steps, n_cores, model, g_correct, r_correct) if np.isnan(red_amplitude): if plot_lightcurve: first_mjd = np.nanmin(np.array(output_table['MJD']).astype(float)) bright_mjd = output_table['MJD'][np.nanargmin(output_table['Mag'])] quick_plot(object_name, ra_deg, dec_deg, output_table, first_mjd, bright_mjd, full_range = True) # If running normal FLEET, stop here if save_features == False: return table.Table() ##### Catalog data ##### data_catalog_out = get_catalog(object_name, ra_deg, dec_deg, search_radius, dust_map, reimport_catalog) if len(data_catalog_out) == 0: print('No data found in SDSS or 3PI') # If running normal FLEET, stop here if save_features == False: return table.Table() ##### Catalog Operations ##### data_catalog = catalog_operations(object_name, data_catalog_out, ra_deg, dec_deg, Pcc_filter, Pcc_filter_alternative, neighbors, recalculate_nature) ##### Find the Best host ##### host_radius, host_separation, host_ra, host_dec, host_Pcc, host_magnitude, host_nature, photoz, photoz_err, specz, specz_err, best_host = get_best_host(data_catalog, star_separation, star_cut) ##### Use Appropriate Redshift ##### if np.isfinite(float(redshift)): # User specified redshift use_redshift = float(redshift) redshift_label = 'specz' elif np.isfinite(float(specz)): # Spectroscopic Redshift use_redshift = float(specz) redshift_label = 'specz' elif np.isfinite(float(photoz)): # Photometric Redshift use_redshift = float(photoz) redshift_label = 'photoz' else: # No Redshift use_redshift = np.nan redshift_label = 'none' ##### Get Features ##### features_table = create_features(object_name, red_amplitude, red_amplitude2, red_offset, red_magnitude, green_amplitude, green_amplitude2, green_offset, green_magnitude, model_color, bright_mjd, first_mjd, green_brightest, red_brightest, host_radius, host_separation, host_Pcc, host_magnitude, hostless_cut, use_redshift) ##### Save Features for training ##### if save_features: # Save output foldername = '%s_%s'%(int(float(date_range)), model) if len(glob.glob(foldername)) == 0: os.system('mkdir %s'%foldername) features_table.write(filename, format = 'ascii', overwrite = True) if plot_lightcurve: quick_plot(object_name, ra_deg, dec_deg, output_table, first_mjd, bright_mjd, g_correct, r_correct, red_amplitude, red_amplitude2, red_offset, red_magnitude, green_amplitude, green_amplitude2, green_offset, green_magnitude, full_range = False) return features_table # Empty variables (9 is the number of classes) quick_probability_average = np.nan * np.ones(9) quick_probability_std = np.nan * np.ones(9) late_probability_average = np.nan * np.ones(9) late_probability_std = np.nan * np.ones(9) redshift_probability_average = np.nan * np.ones(9) redshift_probability_std = np.nan * np.ones(9) host_probability_average = np.nan * np.ones(9) host_probability_std = np.nan * np.ones(9) ##### Run Classifier ##### if classifier == '': quick_probability_average = create_training_testing(object_name, features_table, training_days, model, clean, feature_set, sorting_state, SMOTE_state, clf_state, n_estimators, max_depth, hostless_cut)[0] elif classifier == 'quick': quick_probability_average = create_training_testing(object_name, features_table, training_days = 20, model = 'single', clean = 0, feature_set = 13, max_depth = 7)[0] elif classifier == 'late': late_probability_average = create_training_testing(object_name, features_table, training_days = 70, model = 'double', clean = 0, feature_set = 7 , max_depth = 9)[0] elif classifier == 'redshift': redshift_probability_average = create_training_testing(object_name, features_table, training_days = 20, model = 'single', clean = 0, feature_set = 16, max_depth = 7)[0] elif classifier == 'host': host_probability_average = create_training_testing(object_name, features_table, training_days = 70, model = 'double', clean = 1, feature_set = 7 , max_depth = 9)[0] ##### All in one classifier ##### elif classifier == 'all': # Quick Classifier for i in range(n_samples): quick_probability_n = create_training_testing(object_name, features_table, training_days = 20, model = 'single', clean = 0, feature_set = 13, max_depth = 7, clf_state = int(39 + i)) if i == 0: quick_probability = quick_probability_n else: quick_probability = np.vstack([quick_probability, quick_probability_n]) quick_probability_average = np.average(quick_probability, axis = 0) quick_probability_std = np.std (quick_probability, axis = 0) # Late Classifier for i in range(n_samples): late_probability_n = create_training_testing(object_name, features_table, training_days = 70, model = 'double', clean = 0, feature_set = 7, max_depth = 9, clf_state = int(39 + i)) if i == 0: late_probability = late_probability_n else: late_probability = np.vstack([late_probability, late_probability_n]) late_probability_average = np.average(late_probability, axis = 0) late_probability_std = np.std (late_probability, axis = 0) # Redsfhit Classifier if np.isfinite(np.float(use_redshift)) & (host_Pcc <= hostless_cut): for i in range(n_samples): redshift_probability_n = create_training_testing(object_name, features_table, training_days = 20, model = 'single', clean = 0, feature_set = 16, max_depth = 7, clf_state = int(39 + i)) if i == 0: redshift_probability = redshift_probability_n else: redshift_probability = np.vstack([redshift_probability, redshift_probability_n]) redshift_probability_average = np.average(redshift_probability, axis = 0) redshift_probability_std = np.std (redshift_probability, axis = 0) else: redshift_probability_average = np.nan * np.ones(len(quick_probability_average)) redshift_probability_std = np.nan * np.ones(len(quick_probability_average)) # Host Classifier if (host_Pcc <= hostless_cut): for i in range(n_samples): host_probability_n = create_training_testing(object_name, features_table, training_days = 70, model = 'double',
masks = (inputs.data[:, :, self.field.vocab.stoi['<pad>']] != 1).float() if bp < 1.0: masks = self.change_bp_masks(masks, bp) return masks def find_captions_length(self, all_captions): # find length of each caption all_captions_lengths = [] # list of lists if type(all_captions[0]) == list: num_captions = len(all_captions[0]) for i in range(num_captions): caption_length = 0 for j in range(len(all_captions)): caption_length += len(all_captions[j][i].split(' ')) caption_length = int(caption_length / len(all_captions)) all_captions_lengths.append(caption_length) else: for cap in all_captions: all_captions_lengths.append(len(cap.split(' '))) return all_captions_lengths def quick_prepare_mscoco(self, batch, all_captions=None, fast=True, inputs_dec='pool', trg_len_option=None, max_len=20, trg_len_dic=None, decoder_inputs=None, targets=None, decoder_masks=None, target_masks=None, source_masks=None, bp=1.00, gpu=True): features_beforepool, captions = batch[0], batch[1] batch_size, d_model = features_beforepool.size(0), features_beforepool.size(1) # batch_size x 49 x 512 features_beforepool = features_beforepool.view(batch_size, d_model, 49).transpose(1, 2) if gpu: encoding = self.encoding(Variable(features_beforepool, requires_grad=False).cuda(), source_masks) # batch of resnet features source_masks = torch.FloatTensor(batch_size, 49).fill_(1).cuda() targets = self.prepare_target_captions(captions, self.field.vocab.stoi).cuda() else: encoding = self.encoding(Variable(features_beforepool, requires_grad=False), source_masks) # batch of resnet features source_masks = torch.FloatTensor(batch_size, 49).fill_(1) targets = self.prepare_target_captions(captions, self.field.vocab.stoi) # list of batch_size all_captions_lengths = self.find_captions_length(all_captions) # predicted decoder lens if trg_len_option == "predict": # batch_size tensor if gpu: target_len = Variable(torch.from_numpy(np.clip(np.array(all_captions_lengths), 0, self.max_offset)).cuda(), requires_grad=False) else: target_len = Variable(torch.from_numpy(np.clip(np.array(all_captions_lengths), 0, self.max_offset)), requires_grad=False) # HARDCODED (4 layer model) !!! pred_target_len_logits = self.pred_len((encoding[0]+encoding[1]+encoding[2]+encoding[3]+encoding[4]).mean(1)) pred_target_len_loss = F.cross_entropy(pred_target_len_logits, target_len.long()) pred_target_len = pred_target_len_logits.max(-1)[1] if fast == False: decoder_inputs, decoder_masks = self.prepare_decoder_inputs(targets, decoder_inputs, decoder_masks) # prepare decoder-inputs else: if trg_len_option == "fixed": decoder_len = int(max_len) decoder_masks = torch.ones(batch_size, decoder_len) if gpu: decoder_masks = decoder_masks.cuda(encoding[0].get_device()) # TODO ADD BP OPTION elif trg_len_option == "reference" or (trg_len_option == "predict" and self.use_predicted_trg_len == False): decoder_len = max(all_captions_lengths) decoder_masks = np.zeros((batch_size, decoder_len)) for idx in range(decoder_masks.shape[0]): decoder_masks[idx][:all_captions_lengths[idx]] = 1 decoder_masks = torch.from_numpy(decoder_masks).float() if gpu: decoder_masks = decoder_masks.cuda(encoding[0].get_device()) if trg_len_option == "predict": if self.use_predicted_trg_len: pred_target_len = pred_target_len.data.cpu().numpy() decoder_len = np.max(pred_target_len) decoder_masks = np.zeros((batch_size, decoder_len)) for idx in range(pred_target_len.shape[0]): decoder_masks[idx][:pred_target_len[idx]] = 1 decoder_masks = torch.from_numpy(decoder_masks).float() if gpu: decoder_masks = decoder_masks.cuda(encoding[0].get_device()) if bp < 1.0: decoder_masks = self.change_bp_masks(decoder_masks, bp) if not self.use_predicted_trg_len: pred_target_len = pred_target_len.data.cpu().numpy() target_len = target_len.data.cpu().numpy() # calculate error for predicted target length pred_target_len_correct = np.sum(pred_target_len == target_len)*100/batch_size pred_target_len_approx = np.sum(np.abs(pred_target_len - target_len) < 5)*100/batch_size average_target_len_correct = 0 average_target_len_approx = 0 rest = [pred_target_len_loss, pred_target_len_correct, pred_target_len_approx, average_target_len_correct, average_target_len_approx] if inputs_dec == 'pool': # batch_size x 1 x 512 decoder_inputs = torch.mean(features_beforepool, 1, keepdim=True) decoder_inputs = decoder_inputs.repeat(1, int(decoder_len), 1) decoder_inputs = Variable(decoder_inputs, requires_grad=False) if gpu: decoder_inputs = decoder_inputs.cuda(encoding[0].get_device()) elif inputs_dec == 'zeros': decoder_inputs = Variable(torch.zeros(batch_size, int(decoder_len), d_model), requires_grad=False) if gpu: decoder_inputs = decoder_inputs.cuda(encoding[0].get_device()) # REMOVE THE FIRST <INIT> TAG FROM CAPTIONS targets = targets[:, 1:] if gpu: target_masks = (targets != 1).float().cuda().data else: target_masks = (targets != 1).float().data if trg_len_option != "predict": rest = [] sources = None return decoder_inputs, decoder_masks, targets, target_masks, sources, source_masks, encoding, decoder_inputs.size(0), rest def prepare_target_captions(self, captions, vocab): # captions : batch_size X seq_len lst = [] batch_size = len(captions) for bidx in range(batch_size): lst.append( ["<init>"] + captions[ bidx ].lower().split() + ["<eos>"] ) #lst.append( [ vocab[idx] for idx in captions[ random.randint(0,4) ][ bidx ].lower().split() ] ) lst = [[vocab[idx] if idx in vocab else 0 for idx in sentence] for sentence in lst] seq_len = max( [len(xx) for xx in lst] ) captions = np.ones((batch_size, seq_len)) for bidx in range(batch_size): min_len = min(seq_len, len(lst[bidx])) captions[bidx, :min_len] = np.array(lst[bidx][:min_len]) captions = torch.from_numpy(captions).long() return Variable(captions, requires_grad=False) def quick_prepare(self, batch, fast=True, trg_len_option=None, trg_len_ratio=2.0, trg_len_dic=None, decoder_inputs=None, targets=None, decoder_masks=None, target_masks=None, source_masks=None, bp=1.00): sources, source_masks = self.prepare_sources(batch, source_masks) encoding = self.encoding(sources, source_masks) targets, target_masks = self.prepare_targets(batch, targets, decoder_masks) # prepare decoder-targets # predicted decoder masks if trg_len_option == "predict": target_offset = Variable((target_masks.sum(-1) - source_masks.sum(-1)).clamp_(-self.max_offset, self.max_offset), requires_grad=False) # batch_size tensor source_len = Variable(source_masks.sum(-1), requires_grad=False) pred_target_offset_logits = self.pred_len((encoding[0]+encoding[1]+encoding[2]+encoding[3]+encoding[4]+encoding[5]).mean(1)) pred_target_offset_logits = self.pred_len_drop( pred_target_offset_logits ) pred_target_len_loss = F.cross_entropy(pred_target_offset_logits, (target_offset + self.max_offset).long()) pred_target_offset = pred_target_offset_logits.max(-1)[1] - self.max_offset pred_target_len = source_len.long() + pred_target_offset d_model = encoding[0].size(-1) batch_size, src_max_len = source_masks.size() rest = [] if fast: # compute decoder_masks if trg_len_option == "reference": _, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks, bp=bp) elif trg_len_option == "noisy_ref": bp = np.random.uniform(bp, 1.0) _, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks, bp=bp) elif trg_len_option == "average": decoder_masks = make_decoder_masks(source_masks, trg_len_dic) # we use the average target lengths elif trg_len_option == "predict": # convert to numpy arrays first source_len = source_masks.sum(-1).cpu().numpy() target_len = target_masks.sum(-1).cpu().numpy() pred_target_len = pred_target_len.data.cpu().numpy() if not self.use_predicted_trg_len: _, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks, bp=bp) else: decoder_max_len = max(pred_target_len) decoder_masks = np.zeros((batch_size, decoder_max_len)) for idx in range(pred_target_len.shape[0]): decoder_masks[idx][:pred_target_len[idx]] = 1 decoder_masks = torch.from_numpy(decoder_masks).float() if source_masks.is_cuda: decoder_masks = decoder_masks.cuda(source_masks.get_device()) if bp < 1.0: decoder_masks = self.change_bp_masks(decoder_masks, bp) # check the results of predicting target length pred_target_len_correct = np.sum(pred_target_len == target_len)*100/batch_size pred_target_len_approx = np.sum(np.abs(pred_target_len - target_len) < 5)*100/batch_size # results with average len average_target_len = [query_trg_len_dic(trg_len_dic, source) for source in source_len] average_target_len = np.array(average_target_len) average_target_len_correct = np.sum(average_target_len == target_len)*100/batch_size average_target_len_approx = np.sum(np.abs(average_target_len - target_len) < 5)*100/batch_size rest = [pred_target_len_loss, pred_target_len_correct, pred_target_len_approx, average_target_len_correct, average_target_len_approx] elif "fixed" in trg_len_option: trg_len = (batch.trg != 1).sum(-1).int().data.cpu().numpy().tolist() source_lens = source_masks.sum(-1).cpu().numpy() decoder_masks = torch.zeros(batch_size, int(round(trg_len_ratio * src_max_len))) dec_len = int(round(trg_len_ratio * src_max_len)) for bi in range(batch_size): ss = source_lens[bi] decoder_masks[bi,:int(round(trg_len_ratio*ss))] = 1 if encoding[0].is_cuda: decoder_masks = decoder_masks.cuda(encoding[0].get_device()) decoder_inputs, decoder_masks = self.prepare_initial(encoding, sources, source_masks, decoder_masks) else: decoder_inputs, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks) # prepare decoder-inputs return decoder_inputs, decoder_masks, targets, target_masks, sources, source_masks, encoding, decoder_inputs.size(0), rest def forward(self, encoding, source_masks, decoder_inputs, decoder_masks, decoding=False, beam=1, alpha=0.6, return_probs=False, positions=None, feedback=None): if (return_probs and decoding) or (not decoding): out = self.decoder(decoder_inputs, encoding, source_masks, decoder_masks) if decoding: if beam == 1: # greedy decoding output = self.decoder.greedy(encoding, source_masks, decoder_masks, feedback=feedback) else: output = self.decoder.beam_search(encoding, source_masks, decoder_masks, beam, alpha) if return_probs: return output, out, self.decoder.out(out) # NOTE don't do softmax for validation #return output, out, softmax(self.decoder.out(out)) return output if return_probs: return out, softmax(self.decoder.out(out)) return out def cost(self, decoder_targets, decoder_masks, out=None): # get loss in a sequence-format to save computational time. decoder_targets, out = prepare_cost(decoder_targets, out, decoder_masks.byte()) logits = self.decoder.out(out) loss = F.cross_entropy(logits, decoder_targets) return loss def batched_cost(self, decoder_targets, decoder_masks, probs, batched=False): # get loss in a batch-mode if decoder_targets.ndimension() == 2: # batch x length loss = -torch.log(probs + TINY).gather(2, decoder_targets[:, :, None])[:, :, 0] # batch x length else: loss = -(torch.log(probs + TINY) * decoder_targets).sum(-1) return self.apply_mask_cost(loss, decoder_masks, batched) class FastTransformer(Transformer): def __init__(self, src=None, trg=None, args=None): super(Transformer, self).__init__() self.is_mscoco = args.dataset == "mscoco" self.decoder_input_how = args.decoder_input_how self.encoder = Encoder(src, args) ''' if self.is_mscoco == False: self.encoder = Encoder(src, args) else: self.encoder = EncoderCNN(args) ''' self.decoder = nn.ModuleList() for ni in range(args.num_decs): self.decoder.append(Decoder(trg, args, causal=False, positional=args.positional, diag=args.diag, out=self.encoder.out if args.share_embed_enc_dec1 and ni == 0 else None) ) self.field = trg if self.is_mscoco == False: self.share_embed = args.share_embed else: self.share_embed = False self.train_repeat_dec = args.train_repeat_dec self.num_decs = args.num_decs if args.trg_len_option == "predict": if args.dataset != "mscoco": self.pred_len = Linear(args.d_model, 2*args.max_offset + 1) else: self.pred_len = Linear(args.d_model, args.max_offset+1) self.pred_len_drop = nn.Dropout(args.drop_len_pred) self.max_offset = args.max_offset self.use_predicted_trg_len = args.use_predicted_trg_len self.n_layers = args.n_layers self.d_model = args.d_model self.softmax = nn.Softmax(dim = -1) def output_decoding(self, outputs, unbpe = True): field, text = outputs if field is 'src': return self.encoder.field.reverse(text.data, unbpe) else: return self.decoder[0].field.reverse(text.data, unbpe) # decoder_masks already decided # computes decoder_inputs def prepare_initial(self, encoding, source=None, source_masks=None, decoder_masks=None, N=1, tau=1): decoder_input_how = self.decoder_input_how d_model = encoding[0].size()[-1] attention = linear_attention(source_masks, decoder_masks, decoder_input_how) if decoder_input_how in ["copy", "pad", "wrap"]: attention = self.apply_mask(attention, decoder_masks, p=1) # p doesn't matter cos masked out attention = attention[:,:,None].expand(*attention.size(), d_model) decoder_inputs = torch.gather(encoding[0], dim=1, index=attention) elif decoder_input_how == "interpolate": decoder_inputs = matmul(attention, encoding[0]) # batch x max_trg x size return decoder_inputs, decoder_masks def forward(self, encoding, source_masks, decoder_inputs, decoder_masks, decoding=False, beam=1, alpha=0.6, return_probs=False, positions=None, feedback=None, iter_=0, T=1): thedecoder = self.decoder[iter_] out = thedecoder(decoder_inputs, encoding, source_masks, decoder_masks, input_embeddings=True, positions=positions, feedback=feedback) # out : output from the (-1)-th DecoderLayer if not decoding: # NOTE training if not return_probs: return out return out, softmax(thedecoder.out(out), T=T) # probs logits = thedecoder.out(out) if beam == 1: output = self.apply_mask(logits.max(-1)[1], decoder_masks) # NOTE given mask, set non-mask to 1 else: output, decoder_masks = topK_search(logits, decoder_masks, N=beam) output = self.apply_mask(output, decoder_masks) if not return_probs: return output
7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptcontr3pain = Column(Numeric, nullable=True, comments=None) # Rep 3: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptcontr3pain_d = Column(Numeric, nullable=True, comments=None) # Rep 4 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptcontr4pain = Column(Numeric, nullable=True, comments=None) # Rep 4: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptcontr4pain_d = Column(Numeric, nullable=True, comments=None) # Rep 5 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptcontr5pain = Column(Numeric, nullable=True, comments=None) # Rep 5: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptcontr5pain_d = Column(Numeric, nullable=True, comments=None) # Rep 1 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr1pain = Column(Numeric, nullable=True, comments=None) # Rep 1: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr1pain_d = Column(Numeric, nullable=True, comments=None) # Rep 2 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr2pain = Column(Numeric, nullable=True, comments=None) # Rep 2: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr2pain_d = Column(Numeric, nullable=True, comments=None) # Rep 3 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr3pain = Column(Numeric, nullable=True, comments=None) # Rep 3: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr3pain_d = Column(Numeric, nullable=True, comments=None) # Rep 4 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr4pain = Column(Numeric, nullable=True, comments=None) # Rep 4: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr4pain_d = Column(Numeric, nullable=True, comments=None) # Rep 5 # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr5pain = Column(Numeric, nullable=True, comments=None) # Rep 5: (double entry) # Field Type: dropdown # Choices: 0.0, 0.0 | 0.5, 0.5 | 1.0, 1.0 | 1.5, 1.5 | 2.0, 2.0 | 2.5, 2.5 | 3.0, 3.0 | 3.5, 3.5 | 4.0, 4.0 | 4.5, 4.5 | 5.0, 5.0 | 5.5, 5.5 | 6.0, 6.0 | 6.5, 6.5 | 7.0, 7.0 | 7.5, 7.5 | 8.0, 8.0 | 8.5, 8.5 | 9.0, 9.0 | 9.5, 9.5 | 10.0, 10.0 dmaptindxr5pain_d = Column(Numeric, nullable=True, comments=None) # Patient-Specific site assessed? # Field Type: radio # Choices: 1, 1. No, no new pain site or < 4 cm from standardized site | 2, 2. Yes, tested pt-specific site dmaptspecsite = Column(Integer, nullable=True, comments=None) # Sensation comparison patient-specific site # Field Type: radio # Choices: 1, 1. Equal on both sides | 2, 2. Stronger on surgery side | 3, 3. Stronger on contralateral side dmaptsenscompare = Column(Integer, nullable=True, comments=None) # DMA Test(s) completed # Field Type: radio # Choices: 1, Yes, all 4 sites | 2, Yes, but only some sites | 0, None dmatestcompyn = Column(Integer,
""" Contains LotlanTreeVisitor """ # standard libraries from enum import Enum import re # local sources from lotlan_scheduler.model.transport.transport import Transport from lotlan_scheduler.model.transport.template import Template from lotlan_scheduler.model.transport.instance import Instance from lotlan_scheduler.model.transport.task import Task from lotlan_scheduler.api.transportorder_step import TransportOrderStep from lotlan_scheduler.api.transportorder import TransportOrder from lotlan_scheduler.api.location import Location from lotlan_scheduler.api.event import Event # globals defines from lotlan_scheduler.defines import (TRIGGERED_BY_KEY, FINISHED_BY_KEY, REPEAT_KEY, ON_DONE_KEY, TRANSPORT_ORDER_KEY, LOCATION_KEY) from lotlan_scheduler.parser.LoTLanParserVisitor import LoTLanParserVisitor class OptionaStatement(Enum): ''' Enum to set the type of the optional statement when returned ''' TRIGGERED_BY = 1 FINISHED_BY = 2 ON_DONE = 3 class LotlanTreeVisitor(LoTLanParserVisitor): ''' Uses visitor pattern from antlr to traverse the parse tree and store program information in a Transport object ''' def __init__(self, error_listener): super() self.context_object = {} self.error_listener = error_listener # Visit a parse tree produced by TaskParser#program. def visitProgram(self, ctx): # Create Program self.cp = Transport() self.cp.context_object = self.context_object if ctx.children: for child in ctx.children: program_component = self.visit(child) # Get object Template|Instance|Task|TOS # append appropiatly into the corresponding list if isinstance(program_component, Template): if program_component.name not in self.cp.templates: self.cp.templates[program_component.name] = (program_component) else: msg = "There is already an template with the same name defined" self.error_listener.print_error(msg, child.start.line, child.start.column, 1) if isinstance(program_component, Instance): if program_component.name not in self.cp.instances: self.cp.instances[program_component.name] = (program_component) else: msg = "There is already an instance with the same name defined" self.error_listener.print_error(msg, child.start.line, child.start.column, 1) if isinstance(program_component, Task): if program_component.name not in self.cp.tasks: self.cp.tasks[program_component.name] = (program_component) else: msg = "There is already a task with the same name defined" self.error_listener.print_error(msg, child.start.line, child.start.column, 1) if isinstance(program_component, TransportOrderStep): if program_component.name not in self.cp.transport_order_steps: self.cp.transport_order_steps[program_component.name] = (program_component) else: msg = "There is already a transport order step with the same name defined" self.error_listener.print_error(msg, child.start.line, child.start.column, 1) # Add tos for transport order to task for task in self.cp.tasks.values(): to = task.transport_order if to is not None: try: to.pickup_tos = self.cp.transport_order_steps[to.pickup_tos.name] to.delivery_tos = self.cp.transport_order_steps[to.delivery_tos.name] except KeyError: pass for tos in self.cp.transport_order_steps.values(): event_instances = {} for instance in self.cp.instances.values(): if instance.template_name == "Event": event_instances[instance.name] = instance self.get_events_from_tos(tos.triggered_by_statements, event_instances, tos.triggered_by) self.get_events_from_tos(tos.finished_by_statements, event_instances, tos.finished_by) return self.cp def get_events_from_tos(self, expression, events, event_list, value=None, comparator=None): if type(expression) == str and expression != "" and expression in events: if value is None: value = True if comparator is None: comparator = "==" logical_name = expression physical_name = events[logical_name].keyval["name"] event_type = events[logical_name].keyval["type"] event_list.append(Event(logical_name, physical_name, event_type, comparator, value)) elif type(expression) == dict: if len(expression) == 2: self.get_events_from_tos(expression["value"], events, event_list, value=False, comparator="!") elif len(expression) == 3: if expression["binOp"] == ".": self.get_events_from_tos(str(expression["left"] + "." + str(expression["right"])), events, event_list) elif expression["left"] == "(" and expression["right"] == ")": self.get_events_from_tos(expression["binOp"], events, event_list) elif type(expression["right"]) == str: self.get_events_from_tos(str(expression["left"]), events, event_list, value=expression["right"], comparator=expression["binOp"]) else: self.get_events_from_tos(expression["left"], events, event_list) self.get_events_from_tos(expression["right"], events, event_list) # Visit a parse tree produced by TaskParser#template def visitTemplate(self, ctx): t = Template() t.name = self.visitTemplateStart(ctx.templateStart()) t.context = ctx keyval = [] for child in ctx.memberVariable(): variable_content = self.visitMemberVariable(child) keyval.append(variable_content[0]) t.keyval = keyval return t # Visit a parse tree produced by TaskParser#templateStart def visitTemplateStart(self, ctx): return ctx.TEMPLATE().getText().split(" ")[1] # Visit a parse tree produced by TaskParser#instance def visitInstance(self, ctx): instance = Instance() # Retreive Template and Instance name names = self.visitInstanceStart(ctx.instanceStart()) instance.template_name = names[0] instance.name = names[1] instance.context = ctx keyval = {} for child in ctx.memberVariable(): variable_content = self.visitMemberVariable(child) variable = variable_content[0] value = variable_content[1] value = value.replace('"', "") keyval[variable] = value if variable in instance.context_dict: print("multiple definitions of variable " + variable) else: instance.context_dict[variable] = child instance.keyval = keyval return instance # Visit a parse tree produced by TaskParser#instanceStart def visitInstanceStart(self, ctx): template_name = ctx.INSTANCE().getText().split(" ")[0] instance_name = ctx.STARTS_WITH_LOWER_C_STR().getText() return (template_name, instance_name) # Visit a parse tree produced by TaskParser#memberVariable. def visitMemberVariable(self, ctx): assignment_str = ctx.ASSIGNMENT().getText() p = re.compile(r'\w+') variable_name = p.search(assignment_str).group(0) return (variable_name, self.visitValue(ctx.value())) # Visit a parse tree produced by TaskParser#value. def visitValue(self, ctx): value = 0 if ctx.STRING_VALUE(): value = ctx.STRING_VALUE().getText() elif ctx.NUMERIC_VALUE(): value = ctx.NUMERIC_VALUE().getText() else: value = ctx.EMPTY_VALUE().getText() return value # Visit a parse tree produced by TaskParser#transportOrderStep. def visitTransportOrderStep(self, ctx): tos = TransportOrderStep() tos.name = self.visitTosStart(ctx.tosStart()) tos.context = ctx for child in ctx.tosStatement(): self.visitTosStatement(child, tos) return tos # Visit a parse tree produced by TaskParser#tosStart. def visitTosStart(self, ctx): return ctx.STARTS_WITH_LOWER_C_STR().getText() # Visit a parse tree produced by TaskParser#tosStatements. def visitTosStatement(self, ctx, tos): context_dict = tos.context_dict if ctx.optTosStatement(): values = self.visitOptTosStatement(ctx.optTosStatement()) context = ctx.optTosStatement() if values[1] == OptionaStatement.TRIGGERED_BY: if TRIGGERED_BY_KEY not in context_dict: tos.triggered_by_statements = values[0] context_dict[TRIGGERED_BY_KEY] = context else: msg = "TriggeredBy is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("TriggeredBy")) elif values[1] == OptionaStatement.FINISHED_BY: if FINISHED_BY_KEY not in context_dict: tos.finished_by_statements = values[0] context_dict[FINISHED_BY_KEY] = context else: msg = "FinishedBy is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("FinishedBy")) elif values[1] == OptionaStatement.ON_DONE: if ON_DONE_KEY not in context_dict: tos.on_done = values[0] context_dict[ON_DONE_KEY] = context else: msg = "OnDone is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("OnDone")) elif ctx.locationStatement(): if LOCATION_KEY not in context_dict: tos.location = self.visitLocationStatement(ctx.locationStatement()) context_dict[LOCATION_KEY] = ctx.locationStatement() else: context = ctx.locationStatement() msg = "Location is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("Location")) elif ctx.parameterStatement(): parameters = self.visitParameterStatement(ctx.parameterStatement()) tos.parameters = parameters def visitParameterStatement(self, ctx): parameters = [] for parameter in ctx.STARTS_WITH_LOWER_C_STR(): parameters.append(parameter.getText()) return parameters # Visit a parse tree produced by TaskParser#Location Statement. def visitLocationStatement(self, ctx): location = Location(ctx.STARTS_WITH_LOWER_C_STR().getText(), "", "") return location # Visit a parse tree produced by TaskParser#optTosStatement. def visitOptTosStatement(self, ctx): childs = ctx.children for i in range(len(ctx.children)): if childs[i] == ctx.eventStatement(): return self.visitEventStatement(ctx.eventStatement()) elif childs[i] == ctx.onDoneStatement(): return self.visitOnDoneStatement(ctx.onDoneStatement()) def visitEventStatement(self, ctx): if ctx.TRIGGERED_BY(): return (self.visitExpression(ctx.expression()), OptionaStatement.TRIGGERED_BY) elif ctx.FINISHED_BY(): return (self.visitExpression(ctx.expression()), OptionaStatement.FINISHED_BY) def visitOnDoneStatement(self, ctx): on_done = [] for task in ctx.STARTS_WITH_LOWER_C_STR(): on_done.append(task.getText()) return (on_done, OptionaStatement.ON_DONE) # Visit a parse tree produced by TaskParser. def visitTask(self, ctx): ti = Task() ti.name = self.visitTaskStart(ctx.taskStart()) ti.context = ctx for child in ctx.taskStatement(): self.visitTaskStatement(child, ti) return ti # Visit a parse tree produced by TaskParser#taskStart. def visitTaskStart(self, ctx): return ctx.STARTS_WITH_LOWER_C_STR().getText() # Visit a parse tree produced by TaskParser#taskStatement. def visitTaskStatement(self, ctx, task_info): context_dict = task_info.context_dict if(ctx.repeatStatement()): repeat_stmt = self.visitRepeatStatement(ctx.repeatStatement()) if REPEAT_KEY not in context_dict: context_dict[REPEAT_KEY] = ctx.repeatStatement() task_info.repeat = repeat_stmt else: context = ctx.repeatStatement() msg = "Repeat was defined multiple times!" self.error_listener.print_error(msg, context.start.line, context.start.column, len("Repeat")) elif ctx.optTosStatement(): values = self.visitOptTosStatement(ctx.optTosStatement()) context = ctx.optTosStatement() if values[1] == OptionaStatement.TRIGGERED_BY: if TRIGGERED_BY_KEY not in context_dict: task_info.triggered_by = values[0] task_info.context_dict[TRIGGERED_BY_KEY] = ctx.optTosStatement() else: msg = "TriggeredBy is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("TriggeredBy")) elif values[1] == OptionaStatement.FINISHED_BY: if FINISHED_BY_KEY not in context_dict: task_info.finished_by = values[0] task_info.context_dict[FINISHED_BY_KEY] = ctx.optTosStatement() else: msg = "FinishedBy is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("FinishedBy")) elif values[1] == OptionaStatement.ON_DONE: if ON_DONE_KEY not in context_dict: task_info.on_done = values[0] context_dict[ON_DONE_KEY] = ctx.optTosStatement() else: msg = "OnDone is definied multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("OnDone")) elif ctx.transportOrder(): if TRANSPORT_ORDER_KEY not in context_dict: task_info.transport_order = self.visitTransportOrder(ctx.transportOrder()) context_dict[TRANSPORT_ORDER_KEY] = ctx.optTosStatement() else: context = ctx.transportOrder() msg = "TransportOrder was defined multiple times" self.error_listener.print_error(msg, context.start.line, context.start.column, len("Transport")) # Visit a parse tree produced by TaskParser#transportOrder. def visitTransportOrder(self, ctx): transport_order = TransportOrder() childs = ctx.children for i in range(len(childs)): if childs[i] == ctx.fromStatement(): self.visitFromStatement(ctx.fromStatement(), transport_order) elif childs[i] == ctx.toStatement(): self.visitToStatement(ctx.toStatement(), transport_order) return transport_order def visitFromStatement(self, ctx, transport_order): transport_order.pickup_tos.name = ctx.STARTS_WITH_LOWER_C_STR().getText() parameters = ctx.parameters() if parameters is not None: for parameter in parameters.children: if parameter.getText() != ",": transport_order.from_parameters.append(parameter.getText()) def visitToStatement(self, ctx, transport_order): transport_order.delivery_tos.name = ctx.STARTS_WITH_LOWER_C_STR().getText() parameters = ctx.parameters() if parameters is not None: for parameter in parameters.children: if parameter.getText() != ",": transport_order.to_parameters.append(parameter.getText()) def visitParameters(self, ctx): parameters = [] if ctx: for parameter in ctx: parameters.append(parameter.getText()) return parameters def visitRepeatStatement(self, ctx): return ctx.INTEGER().getText() # Visit a parse tree produced by TaskParser#expression. def visitExpression(self, ctx): length = len(ctx.children) if length == 1: # Terminal, returns TERMINAL ele = self._getContent(ctx.children[0]) return ele if length == 2: # UnOperation, returns {unop: !, value: EXPRESSION} unOp = self._getContent(ctx.children[0]) ele = self._getContent(ctx.children[1]) return dict(unop=unOp, value=ele) if length == 3: # binOperation:, returns {op: (==|!=|<=|..|), left: EXPRESSION, right:EXPRESSION} left = self._getContent(ctx.children[0]) binOp = self._getContent(ctx.children[1]) right = self._getContent(ctx.children[2]) return dict(binOp=binOp, left=left, right=right) return None def _getContent(self, child): ele = self.visit(child) # If None, then an instance.value is used! if
name_btn.style.button_color = 'lightgreen' self.float316 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='initial concentration external IL-1b', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float316, units_btn, description_btn] box340 = Box(children=row, layout=box_layout) name_btn = Button(description='cytoplasmic_IL_18', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float317 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='initial concentration cytoplasmic IL-18', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float317, units_btn, description_btn] box341 = Box(children=row, layout=box_layout) name_btn = Button(description='external_IL_18', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float318 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='initial concentration external IL-18', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float318, units_btn, description_btn] box342 = Box(children=row, layout=box_layout) name_btn = Button(description='cytoplasmic_volume', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float319 = FloatText(value='2494', step='100', style=style, layout=widget_layout) units_btn = Button(description='a.u. of volume', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='cytoplasmic cell volume', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float319, units_btn, description_btn] box343 = Box(children=row, layout=box_layout) name_btn = Button(description='cell_pyroptosis_flag', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float320 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='bool for pyropotosis', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float320, units_btn, description_btn] box344 = Box(children=row, layout=box_layout) name_btn = Button(description='cell_bystander_pyroptosis_flag', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float321 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='bool for bystander pyropotosis', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float321, units_btn, description_btn] box345 = Box(children=row, layout=box_layout) name_btn = Button(description='cell_virus_induced_apoptosis_flag', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float322 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='bool for bystander pyropotosis', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float322, units_btn, description_btn] box346 = Box(children=row, layout=box_layout) name_btn = Button(description='internalised_pro_pyroptosis_cytokine', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float323 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='none', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='used internally to track pro-pyroptotic cytokine concentration', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float323, units_btn, description_btn] box347 = Box(children=row, layout=box_layout) name_btn = Button(description='interferon_secretion_rate_via_infection', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float324 = FloatText(value='0.05', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='1/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='Type-1 interferon secretion rate for infected cells', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float324, units_btn, description_btn] box348 = Box(children=row, layout=box_layout) name_btn = Button(description='max_interferon_secretion_rate_via_paracrine', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float325 = FloatText(value='0.5', step='0.1', style=style, layout=widget_layout) units_btn = Button(description='1/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='Type-1 interferon secretion rate after activation by Type-1 interferon', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float325, units_btn, description_btn] box349 = Box(children=row, layout=box_layout) name_btn = Button(description='interferon_max_response_threshold', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float326 = FloatText(value='1', step='0.1', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='Interferon response scales linearly until Int-1 exceeds this threshold', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float326, units_btn, description_btn] box350 = Box(children=row, layout=box_layout) name_btn = Button(description='interferon_activation', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float327 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='Current interferon signaling activation state (between 0 and 1)', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float327, units_btn, description_btn] box351 = Box(children=row, layout=box_layout) name_btn = Button(description='interferon_max_virus_inhibition', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float328 = FloatText(value='0.9', step='0.1', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='At max interferon activation, max inhibition of viral replication (between 0 and 1)', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float328, units_btn, description_btn] box352 = Box(children=row, layout=box_layout) name_btn = Button(description='interferon_viral_RNA_threshold', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float329 = FloatText(value='2', step='0.1', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='infected cell interferon secretion saturates at this viral RNA level', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float329, units_btn, description_btn] box353 = Box(children=row, layout=box_layout) name_btn = Button(description='TCell_contact_time', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float330 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='tracks total contact time with CD8 T cells', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float330, units_btn, description_btn] box354 = Box(children=row, layout=box_layout) name_btn = Button(description='cell_attachment_rate', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float331 = FloatText(value='0.2', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='1/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='the rate at which the cell attaches to cells in contact', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float331, units_btn, description_btn] box355 = Box(children=row, layout=box_layout) name_btn = Button(description='cell_attachment_lifetime', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float332 = FloatText(value='8.5', step='0.1', style=style, layout=widget_layout) units_btn = Button(description='min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='the mean duration of a cell-cell attachment', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float332, units_btn, description_btn] box356 = Box(children=row, layout=box_layout) name_btn = Button(description='TCell_contact_death_threshold', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float333 = FloatText(value='50', step='1', style=style, layout=widget_layout) units_btn = Button(description='min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='threshold CD8 T cell contact time to trigger apoptosis', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float333, units_btn, description_btn] box357 = Box(children=row, layout=box_layout) name_btn = Button(description='max_attachment_distance', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float334 = FloatText(value='15', step='1', style=style, layout=widget_layout) units_btn = Button(description='micron', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float334, units_btn, description_btn] box358 = Box(children=row, layout=box_layout) name_btn = Button(description='elastic_attachment_coefficient', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float335 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout) units_btn = Button(description='1/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='elastic coefficient for cell-cell attachment', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float335, units_btn, description_btn] box359 = Box(children=row, layout=box_layout) name_btn = Button(description='time_to_next_phagocytosis', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float336 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='time it takes for the apoptotic material to be phagocytosed', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float336, units_btn, description_btn] box360 = Box(children=row, layout=box_layout) name_btn = Button(description='material_internalisation_rate', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float337 = FloatText(value='1', step='0.1', style=style, layout=widget_layout) units_btn = Button(description='micron/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float337, units_btn, description_btn] box361 = Box(children=row, layout=box_layout) name_btn = Button(description='threshold_macrophage_volume', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float338 = FloatText(value='6500', step='100', style=style, layout=widget_layout) units_btn = Button(description='micron', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float338, units_btn, description_btn] box362 = Box(children=row, layout=box_layout) name_btn = Button(description='threshold_neutrophil_volume', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float339 = FloatText(value='1581', step='100', style=style, layout=widget_layout) units_btn = Button(description='micron', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float339, units_btn, description_btn] box363 = Box(children=row, layout=box_layout) name_btn = Button(description='exhausted_macrophage_death_rat', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float340 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout) units_btn = Button(description='1/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float340, units_btn, description_btn] box364 = Box(children=row, layout=box_layout) name_btn = Button(description='ability_to_phagocytose_infected_cell', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float341 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='Boolean for whether macrophages can phagocytose infected cells', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float341, units_btn, description_btn] box365 = Box(children=row, layout=box_layout) name_btn = Button(description='time_of_DC_departure', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'lightgreen' self.float342 = FloatText(value='0', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'lightgreen' description_btn = Button(description='Time DC leaves tissue after activation', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'lightgreen' row = [name_btn, self.float342, units_btn, description_btn] box366 = Box(children=row, layout=box_layout) name_btn = Button(description='phagocytosis_rate', disabled=True, layout=name_button_layout) name_btn.style.button_color = 'tan' self.float343 = FloatText(value='0.167', step='0.01', style=style, layout=widget_layout) units_btn = Button(description='1/min', disabled=True, layout=name_button_layout) units_btn.style.button_color = 'tan' description_btn = Button(description='', disabled=True, layout=desc_button_layout) description_btn.style.button_color = 'tan' row = [name_btn, self.float343, units_btn, description_btn] box367
str, *args: Any, **kwargs: Any ) -> Optional[JSONLike]: """Try to call a function on the ledger API.""" function = getattr(self._api.eth, callable_name) response = function(*args, **kwargs) if isinstance(response, AttributeDict): result = AttributeDictTranslator.to_dict(response) return result if type(response) in (int, float, bytes, str, list, dict): # pragma: nocover # missing full checks for nested objects return {f"{callable_name}_result": response} raise NotImplementedError( # pragma: nocover f"Response must be of types=int, float, bytes, str, list, dict. Found={type(response)}." ) def get_transfer_transaction( # pylint: disable=arguments-differ self, sender_address: Address, destination_address: Address, amount: int, tx_fee: int, tx_nonce: str, chain_id: Optional[int] = None, max_fee_per_gas: Optional[int] = None, max_priority_fee_per_gas: Optional[str] = None, gas_price: Optional[str] = None, gas_price_strategy: Optional[str] = None, gas_price_strategy_extra_config: Optional[Dict] = None, **kwargs: Any, ) -> Optional[JSONLike]: """ Submit a transfer transaction to the ledger. :param sender_address: the sender address of the payer. :param destination_address: the destination address of the payee. :param amount: the amount of wealth to be transferred (in Wei). :param tx_fee: the transaction fee (gas) to be used (in Wei). :param tx_nonce: verifies the authenticity of the tx. :param chain_id: the Chain ID of the Ethereum transaction. :param max_fee_per_gas: maximum amount you’re willing to pay, inclusive of `baseFeePerGas` and `maxPriorityFeePerGas`. The difference between `maxFeePerGas` and `baseFeePerGas + maxPriorityFeePerGas` is refunded (in Wei). :param max_priority_fee_per_gas: the part of the fee that goes to the miner (in Wei). :param gas_price: the gas price (in Wei) :param gas_price_strategy: the gas price strategy to be used. :param gas_price_strategy_extra_config: extra config for gas price strategy. :param kwargs: keyword arguments :return: the transfer transaction """ transaction: Optional[JSONLike] = None chain_id = chain_id if chain_id is not None else self._chain_id destination_address = self._api.toChecksumAddress(destination_address) sender_address = self._api.toChecksumAddress(sender_address) nonce = self._try_get_transaction_count(sender_address) if nonce is None: return transaction transaction = { "nonce": nonce, "chainId": chain_id, "to": destination_address, "value": amount, "gas": tx_fee, "data": tx_nonce, } if self._is_gas_estimation_enabled: transaction = self.update_with_gas_estimate(transaction) if max_fee_per_gas is not None: max_priority_fee_per_gas = ( self._try_get_max_priority_fee() if max_priority_fee_per_gas is None else max_priority_fee_per_gas ) transaction.update( { "maxFeePerGas": max_fee_per_gas, "maxPriorityFeePerGas": max_priority_fee_per_gas, } ) if gas_price is not None: transaction.update({"gasPrice": gas_price}) if gas_price is None and max_fee_per_gas is None: gas_pricing = self.try_get_gas_pricing( gas_price_strategy, gas_price_strategy_extra_config ) if gas_pricing is None: return transaction # pragma: nocover transaction.update(gas_pricing) return transaction def _get_gas_price_strategy( self, gas_price_strategy: Optional[str] = None, extra_config: Optional[Dict] = None, ) -> Optional[Tuple[str, Callable]]: """ Returns parameters for gas price callable. Note: The priority of gas price callable will be `extra_config(Runtime params) > self._gas_price_strategies (Set using config file.) > DEFAULT_GAS_PRICE_STRATEGIES (Default values.)` :param gas_price_strategy: name of the gas price strategy. :param extra_config: gas price strategy getter parameters. :return: gas price strategy's name and callable. """ gas_price_strategy = ( gas_price_strategy if gas_price_strategy is not None else self._default_gas_price_strategy ) if gas_price_strategy not in AVAILABLE_STRATEGIES: # pragma: nocover _default_logger.debug( f"Gas price strategy must be one of {AVAILABLE_STRATEGIES}, provided: {self._default_gas_price_strategy}" ) return None _default_logger.debug(f"Using strategy: {gas_price_strategy}") gas_price_strategy_getter = self._gas_price_strategy_callables.get( gas_price_strategy, None ) parameters = DEFAULT_GAS_PRICE_STRATEGIES.get(gas_price_strategy) parameters.update(self._gas_price_strategies.get(gas_price_strategy, {})) parameters.update(extra_config or {}) return gas_price_strategy, gas_price_strategy_getter(**parameters) @staticmethod def __reprice(old_price: Wei) -> Wei: return Wei(math.ceil(old_price * TIP_INCREASE)) @try_decorator("Unable to retrieve gas price: {}", logger_method="warning") def try_get_gas_pricing( self, gas_price_strategy: Optional[str] = None, extra_config: Optional[Dict] = None, old_price: Optional[Dict[str, Wei]] = None, ) -> Optional[Dict[str, Wei]]: """ Try get the gas price based on the provided strategy. :param gas_price_strategy: the gas price strategy to use, e.g., the EIP-1559 strategy. Can be either `eip1559` or `gas_station`. :param extra_config: gas price strategy getter parameters. :param old_price: the old gas price params in case that we are trying to resubmit a transaction. :return: a dictionary with the gas data. """ retrieved_strategy = self._get_gas_price_strategy( gas_price_strategy, extra_config, ) if retrieved_strategy is None: # pragma: nocover return None gas_price_strategy, gas_price_strategy_callable = retrieved_strategy prior_strategy = self._api.eth.gasPriceStrategy try: self._api.eth.set_gas_price_strategy(gas_price_strategy_callable) gas_price = self._api.eth.generate_gas_price() finally: self._api.eth.set_gas_price_strategy(prior_strategy) # pragma: nocover if gas_price is None or old_price is None: return gas_price gas_price = cast(Dict[str, Wei], gas_price) if gas_price_strategy == EIP1559: updated_max_fee_per_gas = self.__reprice(old_price["maxFeePerGas"]) updated_max_priority_fee_per_gas = self.__reprice( old_price["maxPriorityFeePerGas"] ) if gas_price["maxFeePerGas"] < updated_max_fee_per_gas: gas_price["maxFeePerGas"] = updated_max_fee_per_gas gas_price["maxPriorityFeePerGas"] = updated_max_priority_fee_per_gas elif gas_price_strategy == GAS_STATION: updated_gas_price = self.__reprice(old_price["gasPrice"]) gas_price["gasPrice"] = max(gas_price["gasPrice"], updated_gas_price) return gas_price @try_decorator("Unable to retrieve transaction count: {}", logger_method="warning") def _try_get_transaction_count(self, address: Address) -> Optional[int]: """Try get the transaction count.""" nonce = self._api.eth.get_transaction_count( # pylint: disable=no-member self._api.toChecksumAddress(address) ) return nonce def update_with_gas_estimate(self, transaction: JSONLike) -> JSONLike: """ Attempts to update the transaction with a gas estimate :param transaction: the transaction :return: the updated transaction """ gas_estimate = self._try_get_gas_estimate(transaction) if gas_estimate is not None: transaction["gas"] = gas_estimate return transaction @try_decorator("Unable to retrieve gas estimate: {}", logger_method="warning") def _try_get_gas_estimate(self, transaction: JSONLike) -> Optional[int]: """Try get the gas estimate.""" gas_estimate = self._api.eth.estimate_gas( # pylint: disable=no-member transaction=cast(TxParams, AttributeDictTranslator.from_dict(transaction)) ) return gas_estimate def send_signed_transaction( self, tx_signed: JSONLike, raise_on_try: bool = False ) -> Optional[str]: """ Send a signed transaction and wait for confirmation. :param tx_signed: the signed transaction :param raise_on_try: whether the method will raise or log on error :return: tx_digest, if present """ tx_digest = self._try_send_signed_transaction( tx_signed, raise_on_try=raise_on_try ) return tx_digest @try_decorator("Unable to send transaction: {}", logger_method="warning") def _try_send_signed_transaction( self, tx_signed: JSONLike, **_kwargs: Any ) -> Optional[str]: """ Try send a signed transaction. :param tx_signed: the signed transaction :param _kwargs: the keyword arguments. Possible kwargs are: `raise_on_try`: bool flag specifying whether the method will raise or log on error (used by `try_decorator`) :return: tx_digest, if present """ signed_transaction = SignedTransactionTranslator.from_dict(tx_signed) hex_value = self._api.eth.send_raw_transaction( # pylint: disable=no-member signed_transaction.rawTransaction ) tx_digest = hex_value.hex() _default_logger.debug( "Successfully sent transaction with digest: {}".format(tx_digest) ) return tx_digest def get_transaction_receipt(self, tx_digest: str) -> Optional[JSONLike]: """ Get the transaction receipt for a transaction digest. :param tx_digest: the digest associated to the transaction. :return: the tx receipt, if present """ tx_receipt = self._try_get_transaction_receipt(tx_digest) if tx_receipt is not None and not bool(tx_receipt["status"]): tx = self.get_transaction(tx_digest) tx_receipt["revert_reason"] = self._try_get_revert_reason(tx) return tx_receipt @try_decorator( "Error when attempting getting tx receipt: {}", logger_method="debug" ) def _try_get_transaction_receipt(self, tx_digest: str) -> Optional[JSONLike]: """ Try get the transaction receipt. :param tx_digest: the digest associated to the transaction. :return: the tx receipt, if present """ tx_receipt = self._api.eth.get_transaction_receipt( # pylint: disable=no-member cast(HexStr, tx_digest) ) return AttributeDictTranslator.to_dict(tx_receipt) def get_transaction(self, tx_digest: str) -> Optional[JSONLike]: """ Get the transaction for a transaction digest. :param tx_digest: the digest associated to the transaction. :return: the tx, if present """ tx = self._try_get_transaction(tx_digest) return tx @try_decorator("Error when attempting getting tx: {}", logger_method="debug") def _try_get_transaction(self, tx_digest: str) -> Optional[JSONLike]: """ Get the transaction. :param tx_digest: the transaction digest. :return: the tx, if found """ tx = self._api.eth.get_transaction( cast(HexStr, tx_digest) ) # pylint: disable=no-member return AttributeDictTranslator.to_dict(tx) @try_decorator( "Error when attempting getting tx revert reason: {}", logger_method="debug" ) def _try_get_revert_reason(self, tx: TxData) -> str: """Try to check the revert reason of a transaction. :param tx: the transaction for which we want to get the revert reason. :return: the revert reason message. """ # build a new transaction to replay: replay_tx = { "to": tx["to"], "from": tx["from"], "value": tx["value"], "data": tx["input"], } try: # replay the transaction on the provider self.api.eth.call(replay_tx, tx["blockNumber"] - 1) except SolidityError as e: # execution reverted exception return str(e) except HTTPError as e: # http exception raise e else: # given tx not reverted raise ValueError(f"The given transaction has not been reverted!\ntx: {tx}") def get_contract_instance( self, contract_interface: Dict[str, str], contract_address: Optional[str] = None ) -> Any: """ Get the instance of a contract. :param contract_interface: the contract interface. :param contract_address: the contract address. :return: the contract instance """ if contract_address is None: instance = self.api.eth.contract( abi=contract_interface[_ABI], bytecode=contract_interface[_BYTECODE], ) else: _contract_address = self.api.toChecksumAddress(contract_address) instance = self.api.eth.contract( address=_contract_address, abi=contract_interface[_ABI], bytecode=contract_interface[_BYTECODE], ) return instance def get_deploy_transaction( # pylint: disable=arguments-differ self, contract_interface: Dict[str, str], deployer_address: Address, value: int = 0, gas: Optional[int] = None, max_fee_per_gas: Optional[int] = None, max_priority_fee_per_gas: Optional[str] = None, gas_price: Optional[str] = None, gas_price_strategy: Optional[str] = None, gas_price_strategy_extra_config: Optional[Dict] = None, **kwargs: Any, ) -> Optional[JSONLike]: """ Get the transaction to deploy the smart contract. :param contract_interface: the contract interface. :param
PRIMITIVE representation. <BLANKLINE> alpha = the unique root of x^2 - 3 between 0 and 4 = 1.7320508076- <BLANKLINE> Coordinate 1 = 0 = 0.0000000000 Coordinate 2 = alpha = 1.7320508076- ---------------------------------------------------- We see that, the level of this cell is 2, meaning that it is part of the decomposition of $\RR^2$. The dimension is 1, meaning that the cell is homeomorphic to a line (rather than a plane or a point). The sample point gives the coordinates of one point in the cell, both symbolically and numerically. For programmatic access to cells, we've defined a \sage wrapper class \class{QepcadCell}. These cells can be created with the \method{cell} method; for example:: sage: c = qe.cell(3, 4); c # optional - qepcad QEPCAD cell (3, 4) A \class{QepcadCell} has accessor methods for the important state held within a cell. For instance:: sage: c.level() # optional - qepcad 2 sage: c.index() # optional - qepcad (3, 4) sage: qe.cell(3).number_of_children() # optional - qepcad 5 sage: len(qe.cell(3)) # optional - qepcad 5 One particularly useful thing we can get from a cell is its sample point, as \sage algebraic real numbers. :: sage: c.sample_point() # optional - qepcad (0, 1.732050807568878?) sage: c.sample_point_dict() # optional - qepcad {'y': 1.732050807568878?, 'x': 0} We've seen that we can get cells using the \method{cell} method. There are several QEPCAD commands that print lists of cells; we can also get cells using the \method{make_cells} method, passing it the output of one of these commands. :: sage: qe.make_cells(qe.d_true_cells()) # optional - qepcad [QEPCAD cell (4, 2), QEPCAD cell (3, 4), QEPCAD cell (3, 2), QEPCAD cell (2, 2)] Also, the cells in the stack over a given cell can be accessed using array subscripting or iteration. (Remember that cells in a stack are numbered starting with one; we preserve this convention in the array-subscripting syntax.) :: sage: c = qe.cell(3) # optional - qepcad sage: c[1] # optional - qepcad QEPCAD cell (3, 1) sage: [c2 for c2 in c] # optional - qepcad [QEPCAD cell (3, 1), QEPCAD cell (3, 2), QEPCAD cell (3, 3), QEPCAD cell (3, 4), QEPCAD cell (3, 5)] We can do one more thing with a cell: we can set its truth value. Once the truth values of the cells have been set, we can get QEPCAD to produce a formula which is true in exactly the cells we have selected. This is useful if QEPCAD's quantifier language is insufficient to express your problem. For example, consider again our combined figure of the circle and the ellipse. Suppose you want to find all vertical lines that intersect the circle twice, and also intersect the ellipse twice. The vertical lines that intersect the circle twice can be found by simplifying:: sage: F = qf.exactly_k(2, y, circle == 0); F # optional - qepcad (X2 y)[x^2 + y^2 - 3 = 0] and the vertical lines that intersect the ellipse twice are expressed by:: sage: G = qf.exactly_k(2, y, ellipse == 0); G # optional - qepcad (X2 y)[3 x^2 + 2 x y + y^2 - x + y - 7 = 0] and the lines that intersect both figures would be:: sage: qf.and_(F, G) # optional - qepcad Traceback (most recent call last): ... ValueError: QEPCAD formulas must be in prenex (quantifiers outermost) form ...except that QEPCAD does not support formulas like this; in QEPCAD input, all logical connectives must be inside all quantifiers. Instead, we can get QEPCAD to construct a CAD for our combined figure and set the truth values ourselves. (The exact formula we use doesn't matter, since we're going to replace the truth values in the cells; we just need to use a formula that uses both polynomials.) :: sage: qe = qepcad(qf.and_(ellipse == 0, circle == 0), interact=True) # optional - qepcad sage: qe.go(); qe.go(); qe.go() # optional - qepcad QEPCAD object has moved to phase 'Before Projection (y)' QEPCAD object has moved to phase 'Before Choice' QEPCAD object has moved to phase 'Before Solution' Now we want to find all cells $c$ in the decomposition of $\RR^1$ such that the stack over $c$ contains exactly two cells on the ellipse, and also contains exactly two cells on the circle. Our input polynomials are ``level-2 projection factors'', we see:: sage: qe.d_proj_factors() # optional - qepcad P_1,1 = fac(J_1,1) = fac(dis(A_2,1)) = 8 x^2 - 8 x - 29 P_1,2 = fac(J_1,2) = fac(dis(A_2,2)) = x^2 - 3 P_1,3 = fac(J_1,3) = fac(res(A_2,1|A_2,2)) = 8 x^4 - 26 x^2 - 4 x + 13 A_2,1 = input = y^2 + 2 x y + y + 3 x^2 - x - 7 A_2,2 = input = y^2 + x^2 - 3 so we can test whether a cell is on the ellipse by checking that the sign of the corresponding projection factor is 0 in our cell. For instance, the cell (12,2) is on the ellipse:: sage: qe.cell(12,2).signs()[1][0] # optional - qepcad 0 So we can update the truth values as desired like this:: sage: for c in qe.cell(): # optional - qepcad ... count_ellipse = 0 # optional - qepcad ... count_circle = 0 # optional - qepcad ... for c2 in c: # optional - qepcad ... count_ellipse += (c2.signs()[1][0] == 0) # optional - qepcad ... count_circle += (c2.signs()[1][1] == 0) # optional - qepcad ... c.set_truth(count_ellipse == 2 and count_circle == 2) # optional - qepcad and then we can get our desired solution formula. (The 'G' stands for 'geometric', and gives solutions using the same rules as \code{solution='geometric'} described above.) :: sage: qe.solution_extension('G') # optional - qepcad 8 x^2 - 8 x - 29 < 0 /\ x^2 - 3 < 0 TESTS: Check the qepcad configuration file:: sage: from sage.misc.misc import SAGE_LOCAL sage: open('%s/default.qepcadrc'%SAGE_LOCAL).readlines()[-1] 'SINGULAR .../local/bin\n' AUTHORS: - <NAME> (2008-03): initial version """ #***************************************************************************** # Copyright (C) 2008 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # as published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** import sage.misc.misc import pexpect import re import sys from sage.misc.flatten import flatten from sage.misc.sage_eval import sage_eval from sage.misc.preparser import implicit_mul from expect import Expect, ExpectFunction, AsciiArtString def _qepcad_cmd(memcells=None): r""" Construct a QEPCAD command line. Optionally set the number of memory cells to use. EXAMPLES:: sage: from sage.interfaces.qepcad import _qepcad_cmd sage: from sage.misc.misc import SAGE_LOCAL sage: s = _qepcad_cmd() sage: s == 'env qe=%s qepcad '%SAGE_LOCAL True sage: s = _qepcad_cmd(memcells=8000000) sage: s == 'env qe=%s qepcad +N8000000'%SAGE_LOCAL True """ if memcells is not None: memcells_arg = '+N%s' % memcells else: memcells_arg = '' return "env qe=%s qepcad %s"%(sage.misc.misc.SAGE_LOCAL, memcells_arg) _command_info_cache = None def _update_command_info(): r""" Read the file \file{qepcad.help} to find the list of commands supported by QEPCAD. Used for tab-completion and documentation. EXAMPLES:: sage: from sage.interfaces.qepcad import _update_command_info, _command_info_cache sage: _update_command_info() # optional - qepcad sage: _command_info_cache['approx_precision'] # optional - qepcad ('46', 'abcde', 'm', 'approx-precision N\n\nApproximate algeraic numbers to N decimal places.\n', None) """ global _command_info_cache if _command_info_cache is not None: return cache = {} with open('%s/bin/qepcad.help'%sage.misc.misc.SAGE_LOCAL) as help: assert(help.readline().strip() == '@') while True: cmd_line = help.readline() while len(cmd_line.strip()) == 0: cmd_line = help.readline() cmd_line = cmd_line.strip() if cmd_line == '@@@': break (cmd, id, phases, kind) = cmd_line.split() assert(help.readline().strip() == '@') help_text = '' help_line = help.readline() while help_line.strip() != '@': help_text += help_line help_line = help.readline() # I went through qepcad.help and picked out commands that # I thought might be worth a little extra tweaking... special = None # These commands have been tweaked. if cmd in ['d-all-cells-in-subtree', 'd-cell', 'd-pcad', 'd-pscad', 'd-stack', 'manual-choose-cell']: special = 'cell' if cmd in ['ipfzt', 'rational-sample', 'triv-convert', 'use-db', 'use-selected-cells-cond', 'verbose']: special = 'yn' # The tweaking for these commands has not been implemented yet. if cmd in ['selected-cells-cond']: special = 'formula' if cmd in ['ch-pivot', 'rem-pf', 'rem-pj']: special = 'i,j' if cmd in ['d-2d-cad', 'set-truth-value', 'solution-extension']: special = 'interactive' if cmd in ['p-2d-cad', 'trace-alg', 'trace-data']: special = 'optional' cmd = cmd.replace('-', '_') cache[cmd] = (id, phases, kind, help_text, special) _command_info_cache = cache # QEPCAD does not have a typical "computer
with angle_between() (i.e. abs value of angles.) if 0.75 * np.pi < angle <= np.pi: feature[0] += 1 elif 0.25 * np.pi <= angle < 0.75 * np.pi: feature[1] += 1 elif 0.0 <= angle < 0.25 * np.pi: feature[2] += 1 else: raise ValueError( "Error in binning orientation. Orientation does not fit into any bin." ) return feature @njit def velocity_features( agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities, lower_speed_threshold=0.015, upper_speed_threshold=0.025, ): """ Computes the velocity features described in Vasquez et. al's paper: "Learning to navigate through crowded environments". :param agent_position: position of the agent (robot) :type agent_position: 2d np.array or tuple :param agent_velocity: velocity of the agent (robot) :type agent_velocity: 2d np.array or tuple :param pedestrian_positions: positions of pedestrians. :type pedestrian_positions: 2d float np.array. :param lower_speed_threshold: Lower magnitude of speed threshold threshold used for binning. This is 0.015 in the paper. :type lower_threshold: float :param upper_speed_threshold: Higher magnitude of speed threshold threshold used for binning. This is 0.025 in the paper. :type upper_threshold: float :param pedestrian_velocities: velocities of pedestrians. :type pedestrian_velocities: 2d float np.array. :param lower_threshold: Lower magnitude of speed threshold threshold used for binning. This is 0.015 in the paper. :type lower_threshold: float :param upper_threshold: Higher magnitude of speed threshold threshold used for binning. This is 0.025 in the paper. :type upper_threshold: float :return: orientation feature vector. :rtype: float np.array of shape (3,) """ assert lower_speed_threshold < upper_speed_threshold feature = np.zeros((3, 3)) assert len(pedestrian_positions) == len(pedestrian_velocities) # used to group pedestrians with the same orientation bin together using # their ID. ped_sorted_by_orientation = [np.empty(0, dtype=np.int64)] * 3 for ped_id in range(len(pedestrian_positions)): relative_pos = agent_position - pedestrian_positions[ped_id] relative_vel = agent_velocity - pedestrian_velocities[ped_id] # angle_between produces only positive angles if (relative_pos == np.zeros(2)).all() or ( relative_vel == np.zeros(2) ).all(): # cannot calculate angle between zero vectors angle = 0.0 else: angle = angle_between(relative_pos, relative_vel) # put into bins # Bins adjusted to work with angle_between() (i.e. abs value of angles.) if 0.75 * np.pi < angle <= np.pi: ped_sorted_by_orientation[0] = np.append( ped_sorted_by_orientation[0], ped_id ) elif 0.25 * np.pi <= angle < 0.75 * np.pi: ped_sorted_by_orientation[1] = np.append( ped_sorted_by_orientation[1], ped_id ) elif 0.0 <= angle < 0.25 * np.pi: ped_sorted_by_orientation[2] = np.append( ped_sorted_by_orientation[2], ped_id ) else: raise ValueError("Orientation does not fit into any bin.") for idx, ped_ids in enumerate(ped_sorted_by_orientation): velocities = pedestrian_velocities[ped_ids] if not velocities.size: break else: mean_speeds = np.mean(np.abs(velocities)) # bin speeds if 0 <= mean_speeds < lower_speed_threshold: feature[idx, 0] = 1 elif lower_speed_threshold <= mean_speeds < upper_speed_threshold: feature[idx, 1] = 1 elif mean_speeds >= upper_speed_threshold: feature[idx, 2] = 1 else: raise ValueError("Average speed does not fit in any bins.") return feature.flatten() def social_force_features( agent_radius, agent_position, agent_velocity, pedestrian_positions ): """ Computes the social forces features described in Vasquez et. al's paper: "Learning to navigate through crowded environments". :param agent_radius: radius of agent(s) in the environment. Note: this is the radius of the agent's graphical circle, not a radius around the agent. :type agent_radius: float. :param agent_position: position of the agent (robot) :type agent_position: 2d np.array or tuple :param agent_velocity: velocity of the agent (robot) :type agent_velocity: 2d np.array or tuple :param pedestrian_positions: positions of pedestrians. :type pedestrian_positions: 2d float np.array. :param pedestrian_velocities: velocities of pedestrians. :type pedestrian_velocities: 2d float np.array. :return: orientation feature vector. :rtype: float np.array of shape (3,) """ # in the paper formula, 'i' is our agent, while 'j's are the pedestrians. rel_positions = pedestrian_positions - agent_position rel_distances = np.linalg.norm(rel_positions, axis=1) normalized_rel_positions = rel_positions / np.max(rel_distances) assert rel_positions.shape == normalized_rel_positions.shape rel_angles = np.zeros(rel_distances.shape) # used to group pedestrians with the same orientation bin together using # their ID. feature = np.zeros(3) ped_orientation_bins = [np.empty(0, dtype=np.int64)] * 3 for ped_id in range(len(pedestrian_positions)): relative_pos = rel_positions[ped_id] # angle_between produces only positive angles angle = angle_between(relative_pos, agent_velocity) rel_angles[ped_id] = angle # put into bins # Bins adjusted to work with angle_between() (i.e. abs value of angles.) if 0.75 * np.pi <= angle <= np.pi: ped_orientation_bins[0] = np.append( ped_orientation_bins[0], ped_id ) elif 0.25 * np.pi <= angle < 0.75 * np.pi: ped_orientation_bins[1] = np.append( ped_orientation_bins[1], ped_id ) elif 0.0 <= angle < 0.25 * np.pi: ped_orientation_bins[2] = np.append( ped_orientation_bins[2], ped_id ) else: raise ValueError("Orientation does not fit into any bin.") exp_multiplier = np.exp(2 * agent_radius - rel_distances).reshape(-1, 1) anisotropic_term = (2.0 - 0.5 * (1.0 + np.cos(rel_angles))).reshape(-1, 1) social_forces = ( exp_multiplier * normalized_rel_positions * anisotropic_term ) forces_above_threshold = np.linalg.norm(social_forces, axis=1) > 0.5 feature[0] = np.sum(forces_above_threshold[ped_orientation_bins[0]]) feature[1] = np.sum(forces_above_threshold[ped_orientation_bins[1]]) feature[2] = np.sum(forces_above_threshold[ped_orientation_bins[2]]) return feature @njit def angle_to_goal_features(goal_position, agent_position, agent_orientation): """ computes features based on the error in the agent's heading towards the goal. Error is the angle between agent heading vector and vector (goal_pos - agent_pos). The features are binary features based on where the angle fits in the bins [0-pi/8, pi/8-pi/4, pi/4-3/4pi, 3/4pi-pi]. This is meant to mimic the goal_rel_orientation function. :param goal_position: position of the goal. :type goal_position: 2d numpy vector. :param agent_position: position of agent. :type agent_position: 2d numpy vector. :param agent_orientation: orientation vector of agent. :type agent_orientation: 2d numpy vector. :raises ValueError: If angle does not fit in the [0,pi] interval, something unexpected has happened. :return: feature vector representing binned angles. :rtype: float np.array """ features = np.zeros(4) vector_to_goal = goal_position - agent_position angle = angle_between(agent_orientation, vector_to_goal) # bin in angle bins if 0.0 <= angle < 0.125 * np.pi: features[0] = 1.0 elif 0.125 * np.pi <= angle < 0.25 * np.pi: features[1] = 1.0 elif 0.25 * np.pi <= angle < 0.75 * np.pi: features[2] = 1.0 elif 0.75 * np.pi <= angle <= np.pi: features[3] = 1.0 else: raise ValueError("Cannot bin angle in [0,pi] interval.") return features @njit def vector_to_goal_features(goal_position, agent_position, agent_orientation): features = np.zeros(8) vector_to_goal = goal_position - agent_position angle = total_angle_between(agent_orientation, vector_to_goal) # mimic finding closest relative vector by binning angle if -0.125 * np.pi <= angle < 0.125 * np.pi: features[0] = 1.0 elif 0.125 * np.pi <= angle < 0.375 * np.pi: features[1] = 1.0 elif 0.375 * np.pi <= angle < 0.625 * np.pi: features[2] = 1.0 elif 0.625 * np.pi <= angle < 0.875 * np.pi: features[3] = 1.0 elif 0.875 * np.pi <= angle <= np.pi: features[4] = 1.0 elif -np.pi <= angle < -0.875 * np.pi: features[4] = 1.0 elif -0.875 * np.pi <= angle < -0.625 * np.pi: features[5] = 1.0 elif -0.625 * np.pi <= angle < -0.375 * np.pi: features[6] = 1.0 elif -0.375 * np.pi <= angle < -0.125 * np.pi: features[7] = 1.0 else: raise ValueError("Faled to bin angles in [-pi, pi] range.") return features @njit def orientation_change_features(new_orientation, old_orientation): thresholds = np.array( [0, np.pi / 9, 2 * np.pi / 9, np.pi * 3 / 9, 4 * np.pi / 9] ) if old_orientation is None: print("Warning: old orientation is none, assuming old=new.") orientation_change = 0.0 else: orientation_change = angle_between(new_orientation, old_orientation) # bin based on thresholds features = np.zeros(5) index = np.argmin(np.abs(orientation_change - thresholds)) features[index] = 1.0 return features @njit def SAM_features( agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities, inner_radius, outer_radius, lower_speed_threshold, upper_speed_threshold, ): """ Calculates entire sam features based on Fahad et. al's 2018 paper: "Learning How Pedestrians Navigate: A Deep Inverse Reinforcement Learning Approach" :param agent_position: Position of the agent. :type agent_position: 2d numpy float array. :param agent_velocity: Agent velocity. :type agent_velocity: 2d numpy float array. :param pedestrian_positions: Px2 vector of the position of all pedestrians. :type pedestrian_positions: Px2 numpy float array where P is the number of pedestrians. :param pedestrian_velocities: Px2 vector of the velocity of all pedestrians. :type pedestrian_velocities: Px2 numpy float array where P is the number of pedestrians. :param inner_radius: Radius of inner circle of feature extractor. :type inner_radius: float. :param outer_radius: Radius of outer circle of feature extractor. :type outer_radius: float. :param lower_speed_threshold: lower binning threshold for speed. :type lower_speed_threshold: float. :param upper_speed_threshold: upper binning threshold for speed. :type upper_speed_threshold: float. :return: tuple (SAM_features, density) where SAM_features are the features and density is total
= {'Type': 'GenericResourceType', 'Properties': {'Foo': 'xyz'}} tmpl_diff = {'Properties': {'Foo': 'xyz'}} prop_diff = {'Foo': 'xyz'} self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_update') generic_rsrc.ResourceWithProps.handle_update( utmpl, tmpl_diff, prop_diff).AndReturn(None) self.m.ReplayAll() scheduler.TaskRunner(res.update, utmpl)() self.assertEqual((res.UPDATE, res.COMPLETE), res.state) self.m.VerifyAll() def test_update_replace(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) res.update_allowed_keys = ('Properties',) res.update_allowed_properties = ('Foo',) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) utmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'xyz'}} self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_update') tmpl_diff = {'Properties': {'Foo': 'xyz'}} prop_diff = {'Foo': 'xyz'} generic_rsrc.ResourceWithProps.handle_update( utmpl, tmpl_diff, prop_diff).AndRaise(resource.UpdateReplace()) self.m.ReplayAll() # should be re-raised so parser.Stack can handle replacement updater = scheduler.TaskRunner(res.update, utmpl) self.assertRaises(resource.UpdateReplace, updater) self.m.VerifyAll() def test_update_fail_missing_req_prop(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithRequiredProps('test_resource', tmpl, self.stack) res.update_allowed_keys = ('Properties',) res.update_allowed_properties = ('Foo',) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) utmpl = {'Type': 'GenericResourceType', 'Properties': {}} updater = scheduler.TaskRunner(res.update, utmpl) self.assertRaises(exception.ResourceFailure, updater) self.assertEqual((res.UPDATE, res.FAILED), res.state) def test_update_fail_prop_typo(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) res.update_allowed_keys = ('Properties',) res.update_allowed_properties = ('Foo',) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) utmpl = {'Type': 'GenericResourceType', 'Properties': {'Food': 'xyz'}} updater = scheduler.TaskRunner(res.update, utmpl) self.assertRaises(exception.ResourceFailure, updater) self.assertEqual((res.UPDATE, res.FAILED), res.state) def test_update_not_implemented(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) res.update_allowed_keys = ('Properties',) res.update_allowed_properties = ('Foo',) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) utmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'xyz'}} tmpl_diff = {'Properties': {'Foo': 'xyz'}} prop_diff = {'Foo': 'xyz'} self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_update') generic_rsrc.ResourceWithProps.handle_update( utmpl, tmpl_diff, prop_diff).AndRaise(NotImplemented) self.m.ReplayAll() updater = scheduler.TaskRunner(res.update, utmpl) self.assertRaises(exception.ResourceFailure, updater) self.assertEqual((res.UPDATE, res.FAILED), res.state) self.m.VerifyAll() def test_suspend_resume_ok(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) res.update_allowed_keys = ('Properties',) res.update_allowed_properties = ('Foo',) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) scheduler.TaskRunner(res.suspend)() self.assertEqual((res.SUSPEND, res.COMPLETE), res.state) scheduler.TaskRunner(res.resume)() self.assertEqual((res.RESUME, res.COMPLETE), res.state) def test_suspend_fail_inprogress(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) res.state_set(res.CREATE, res.IN_PROGRESS) suspend = scheduler.TaskRunner(res.suspend) self.assertRaises(exception.ResourceFailure, suspend) res.state_set(res.UPDATE, res.IN_PROGRESS) suspend = scheduler.TaskRunner(res.suspend) self.assertRaises(exception.ResourceFailure, suspend) res.state_set(res.DELETE, res.IN_PROGRESS) suspend = scheduler.TaskRunner(res.suspend) self.assertRaises(exception.ResourceFailure, suspend) def test_resume_fail_not_suspend_complete(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) non_suspended_states = [s for s in itertools.product(res.ACTIONS, res.STATUSES) if s != (res.SUSPEND, res.COMPLETE)] for state in non_suspended_states: res.state_set(*state) resume = scheduler.TaskRunner(res.resume) self.assertRaises(exception.ResourceFailure, resume) def test_suspend_fail_exception(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_suspend') generic_rsrc.ResourceWithProps.handle_suspend().AndRaise(Exception()) self.m.ReplayAll() suspend = scheduler.TaskRunner(res.suspend) self.assertRaises(exception.ResourceFailure, suspend) self.assertEqual((res.SUSPEND, res.FAILED), res.state) def test_resume_fail_exception(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_resume') generic_rsrc.ResourceWithProps.handle_resume().AndRaise(Exception()) self.m.ReplayAll() res.state_set(res.SUSPEND, res.COMPLETE) resume = scheduler.TaskRunner(res.resume) self.assertRaises(exception.ResourceFailure, resume) self.assertEqual((res.RESUME, res.FAILED), res.state) def test_resource_class_to_template(self): class TestResource(resource.Resource): list_schema = {'wont_show_up': {'Type': 'Number'}} map_schema = {'will_show_up': {'Type': 'Integer'}} properties_schema = { 'name': {'Type': 'String'}, 'bool': {'Type': 'Boolean'}, 'implemented': {'Type': 'String', 'Implemented': True, 'AllowedPattern': '.*', 'MaxLength': 7, 'MinLength': 2, 'Required': True}, 'not_implemented': {'Type': 'String', 'Implemented': False}, 'number': {'Type': 'Number', 'MaxValue': 77, 'MinValue': 41, 'Default': 42}, 'list': {'Type': 'List', 'Schema': {'Type': 'Map', 'Schema': list_schema}}, 'map': {'Type': 'Map', 'Schema': map_schema}, } attributes_schema = { 'output1': 'output1_desc', 'output2': 'output2_desc' } expected_template = { 'HeatTemplateFormatVersion': '2012-12-12', 'Parameters': { 'name': {'Type': 'String'}, 'bool': {'Type': 'String', 'AllowedValues': ['True', 'true', 'False', 'false']}, 'implemented': { 'Type': 'String', 'AllowedPattern': '.*', 'MaxLength': 7, 'MinLength': 2 }, 'number': {'Type': 'Number', 'MaxValue': 77, 'MinValue': 41, 'Default': 42}, 'list': {'Type': 'CommaDelimitedList'}, 'map': {'Type': 'Json'} }, 'Resources': { 'TestResource': { 'Type': 'Test::Resource::resource', 'Properties': { 'name': {'Ref': 'name'}, 'bool': {'Ref': 'bool'}, 'implemented': {'Ref': 'implemented'}, 'number': {'Ref': 'number'}, 'list': {'Fn::Split': [",", {'Ref': 'list'}]}, 'map': {'Ref': 'map'} } } }, 'Outputs': { 'output1': { 'Description': 'output1_desc', 'Value': '{"Fn::GetAtt": ["TestResource", "output1"]}' }, 'output2': { 'Description': 'output2_desc', 'Value': '{"Fn::GetAtt": ["TestResource", "output2"]}' } } } self.assertEqual(expected_template, TestResource.resource_to_template( 'Test::Resource::resource')) class ResourceAdoptTest(HeatTestCase): def setUp(self): super(ResourceAdoptTest, self).setUp() utils.setup_dummy_db() resource._register_class('GenericResourceType', generic_rsrc.GenericResource) def test_adopt_resource_success(self): adopt_data = '{}' tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, } }) self.stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl, stack_id=str(uuid.uuid4()), adopt_stack_data=json.loads(adopt_data)) res = self.stack['foo'] res_data = { "status": "COMPLETE", "name": "foo", "resource_data": {}, "metadata": {}, "resource_id": "test-res-id", "action": "CREATE", "type": "GenericResourceType" } adopt = scheduler.TaskRunner(res.adopt, res_data) adopt() self.assertEqual({}, res.metadata) self.assertEqual((res.ADOPT, res.COMPLETE), res.state) def test_adopt_with_resource_data_and_metadata(self): adopt_data = '{}' tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, } }) self.stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl, stack_id=str(uuid.uuid4()), adopt_stack_data=json.loads(adopt_data)) res = self.stack['foo'] res_data = { "status": "COMPLETE", "name": "foo", "resource_data": {"test-key": "test-value"}, "metadata": {"os_distro": "test-distro"}, "resource_id": "test-res-id", "action": "CREATE", "type": "GenericResourceType" } adopt = scheduler.TaskRunner(res.adopt, res_data) adopt() self.assertEqual("test-value", db_api.resource_data_get(res, "test-key")) self.assertEqual({"os_distro": "test-distro"}, res.metadata) self.assertEqual((res.ADOPT, res.COMPLETE), res.state) def test_adopt_resource_missing(self): adopt_data = '''{ "action": "CREATE", "status": "COMPLETE", "name": "my-test-stack-name", "resources": {} }''' tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, } }) self.stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl, stack_id=str(uuid.uuid4()), adopt_stack_data=json.loads(adopt_data)) res = self.stack['foo'] adopt = scheduler.TaskRunner(res.adopt, None) self.assertRaises(exception.ResourceFailure, adopt) expected = 'Exception: Resource ID was not provided.' self.assertEqual(expected, res.status_reason) class ResourceDependenciesTest(HeatTestCase): def setUp(self): super(ResourceDependenciesTest, self).setUp() utils.setup_dummy_db() resource._register_class('GenericResourceType', generic_rsrc.GenericResource) resource._register_class('ResourceWithPropsType', generic_rsrc.ResourceWithProps) self.deps = dependencies.Dependencies() def test_no_deps(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['foo'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) def test_ref(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, 'bar': { 'Type': 'ResourceWithPropsType', 'Properties': { 'Foo': {'Ref': 'foo'}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_hot_ref(self): '''Test that HOT get_resource creates dependencies.''' tmpl = template.Template({ 'heat_template_version': '2013-05-23', 'resources': { 'foo': {'type': 'GenericResourceType'}, 'bar': { 'type': 'ResourceWithPropsType', 'properties': { 'Foo': {'get_resource': 'foo'}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_ref_nested_dict(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, 'bar': { 'Type': 'ResourceWithPropsType', 'Properties': { 'Foo': {'Fn::Base64': {'Ref': 'foo'}}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_hot_ref_nested_dict(self): tmpl = template.Template({ 'heat_template_version': '2013-05-23', 'resources': { 'foo': {'type': 'GenericResourceType'}, 'bar': { 'type': 'ResourceWithPropsType', 'properties': { 'Foo': {'Fn::Base64': {'get_resource': 'foo'}}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_ref_nested_deep(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, 'bar': { 'Type': 'ResourceWithPropsType', 'Properties': { 'Foo': {'Fn::Join': [",", ["blarg", {'Ref': 'foo'}, "wibble"]]}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_hot_ref_nested_deep(self): tmpl = template.Template({ 'heat_template_version': '2013-05-23', 'resources': { 'foo': {'type': 'GenericResourceType'}, 'bar': { 'type': 'ResourceWithPropsType', 'properties': { 'foo': {'Fn::Join': [",", ["blarg", {'get_resource': 'foo'}, "wibble"]]}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_ref_fail(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, 'bar': { 'Type': 'ResourceWithPropsType', 'Properties': { 'Foo': {'Ref': 'baz'}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) ex = self.assertRaises(exception.InvalidTemplateReference, getattr, stack, 'dependencies') self.assertIn('"baz" (in bar.Properties.Foo)', str(ex)) def test_hot_ref_fail(self): tmpl = template.Template({ 'heat_template_version': '2013-05-23', 'resources': { 'foo': {'type': 'GenericResourceType'}, 'bar': { 'type': 'ResourceWithPropsType', 'properties': { 'Foo': {'get_resource': 'baz'}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) ex = self.assertRaises(exception.InvalidTemplateReference, getattr, stack, 'dependencies') self.assertIn('"baz" (in bar.Properties.Foo)', str(ex)) def test_getatt(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, 'bar': { 'Type': 'ResourceWithPropsType', 'Properties': { 'Foo': {'Fn::GetAtt': ['foo', 'bar']}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_hot_getatt(self): tmpl = template.Template({ 'heat_template_version': '2013-05-23', 'resources': { 'foo': {'type': 'GenericResourceType'}, 'bar': { 'type': 'ResourceWithPropsType', 'properties': { 'Foo': {'get_attr': ['foo', 'bar']}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_getatt_nested_dict(self): tmpl = template.Template({ 'Resources': { 'foo': {'Type': 'GenericResourceType'}, 'bar': { 'Type': 'ResourceWithPropsType', 'Properties': { 'Foo': {'Fn::Base64': {'Fn::GetAtt': ['foo', 'bar']}}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res, graph) self.assertIn(stack['foo'], graph[res]) def test_hot_getatt_nested_dict(self): tmpl = template.Template({ 'heat_template_version': '2013-05-23', 'resources': { 'foo': {'type': 'GenericResourceType'}, 'bar': { 'type': 'ResourceWithPropsType', 'properties': { 'Foo': {'Fn::Base64': {'get_attr': ['foo', 'bar']}}, } } } }) stack = parser.Stack(utils.dummy_context(), 'test', tmpl) res = stack['bar'] res.add_dependencies(self.deps) graph = self.deps.graph() self.assertIn(res,
<gh_stars>0 import argparse import numpy as np import matplotlib.pyplot as plt import os import pickle import torch from torch import optim, nn from utils.batchutils import make_batch_point, make_batch_line from utils.batchutils import make_target_point, make_target_line from utils.loss import reconstruction_loss, kullback_leibler_loss from utils.models import EncoderRNN, DecoderRNN # from utils.models_line import EncoderRNN_line, DecoderRNN_line from utils.sampleutils import sample_bivariate_normal, sample_univariate_normal from utils.visutils import make_image, make_image_seq, plot_sketch from utils.dataset.datautils import purify, normalize_strokes if __name__ == '__main__': parser = argparse.ArgumentParser(description='Sketch-RNN training Training') parser.add_argument('--parametrization', default='point', choices=['point', 'line'], help='Parametrization of the drawing (line or point)') parser.add_argument('--train_data', default='data/broccoli_car_cat.npz', help='Numpy zip containing the train data') parser.add_argument('--plot_random_train_data', action='store_true', help='plot a randomly chosen train data') parser.add_argument('--experiment', default='uncondition', choices=['uncondition', 'complete'], help='try uncondition of to complete generation') parser.add_argument('--sigma', default=1, help='variance when generating a point') args = parser.parse_args() use_cuda = torch.cuda.is_available() # parametrization = args.parametrization class HParams(): def __init__(self): self.enc_hidden_size = 256 self.dec_hidden_size = 512 self.Nz = 128 self.M = 20 self.dropout = 0.1 self.batch_size = 100 self.eta_min = 0.01 self.R = 0.99995 self.KL_min = 0.2 self.wKL = 0.5 self.lr = 0.001 self.lr_decay = 0.9999 self.min_lr = 0.00001 self.grad_clip = 1. self.temperature = 0.4 self.max_seq_length = 200 #if parametrization == 'line': # self.Mr = 10 # nbr mixture gaussian for radius # self.Mphi = 10 # nbr mixture gaussian for angle # hp = HParams() # hp.use_cuda = use_cuda class DataLoader(): # TODO: Only doing it for point def __init__(self, path_data, hp): ''' loc_train_data : the path to the data hp : an instance of HParams ''' # dataset = np.load(args.train_data, encoding='latin1') dataset = np.load(path_data, encoding='latin1', allow_pickle=True,) # TODO: beware on the allow pickle self.path_data = path_data self.data = dataset['train'] self.valid_set = dataset['valid'] self.test_set = dataset['test'] # TODO: unused for now # preprocess the data self.data = purify(self.data, hp) self.data = normalize_strokes(self.data) self.max_len_out = max([len(seq) for seq in self.data]) # preprocessing the valid data self.valid_set = purify(self.valid_set, hp) self.valid_set = normalize_strokes(self.valid_set) self.max_len_out_val = max([len(seq) for seq in self.valid_set]) def make_batch(self, nbr_datum, use_cuda, parametrization='point', type_set='train'): # TODO: adapt make_batch to another parametrization # randomly selecting nbr_datum datum among the data set if type_set not in ['train', 'valid', 'test']: raise ValueError('this type of dataset does not exist') if type_set == 'train': l_idx = np.random.choice(len(self.data), nbr_datum, replace='False') batch_sequences = [self.data[i] for i in l_idx] elif type == 'valid': l_idx = np.random.choice(len(self.valid_set), nbr_datum) batch_sequences = [self.valid_set[i] for i in l_idx] else: raise ValueError('no use for now of test') return(make_batch_point(batch_sequences, self.max_len_out, use_cuda)) # TODO: something that read directly an input Image and process # it for the network. def select_batch(self, l_idx_datum, use_cuda, type_set='train'): # selecting specific idx # check that idx are valid if not all(i >= 0 for i in l_idx_datum): raise ValueError('some index in l_idx_datum is negative') if type_set == 'train': if not all(i < len(self.data) for i in l_idx_datum): raise ValueError('some index in l_idx_datum is to big') batch_sequences = [self.data[i] for i in l_idx_datum] elif type_set == 'valid': if not all(i < len(self.valid_set) for i in l_idx_datum): raise ValueError('some index in l_idx_datum is to big') batch_sequences = [self.data[i] for i in l_idx_datum] else: raise ValueError('for now no use of test') # TODO: change self.max_len_out return(make_batch_point(batch_sequences, self.max_len_out, use_cuda)) def plot_image(self, idx, plot=True): off_seq = self.data[idx] def make_seq(seq_x, seq_y, seq_z): # transform the lists in the right array x_sample = np.cumsum(seq_x, 0) y_sample = np.cumsum(seq_y, 0) z_sample = np.array(seq_z) sequence_coo = np.stack([x_sample, y_sample, z_sample]).T sequence_offset = np.stack([np.array(seq_x), np.array(seq_y), np.array(z_sample)]).T return(sequence_coo, sequence_offset) seq, _ = make_seq(off_seq[:, 0], off_seq[:, 1], off_seq[:, 2]) # TODO: still not sure of what is off_seq make_image(seq, dest_folder=None, name='_output_', plot=plot) def initialize(self, parametrization, use_cuda, batch_size): if parametrization == 'point': # create start of sequence: if use_cuda: sos = torch.stack([torch.Tensor([0, 0, 1, 0, 0])] * batch_size).cuda().unsqueeze(0) else: sos = torch.stack([torch.Tensor([0, 0, 1, 0, 0])] * batch_size).unsqueeze(0) elif parametrization == 'line': if use_cuda: sos = torch.stack([torch.Tensor([0, 0, 0, 0, 0])] * batch_size).cuda().unsqueeze(0) else: sos = torch.stack([torch.Tensor([0, 0, 0, 0, 0])] * batch_size).unsqueeze(0) return(sos) def lr_decay(optimizer, hp): """Decay learning rate by a factor of lr_decay""" for param_group in optimizer.param_groups: if param_group['lr'] > hp.min_lr: param_group['lr'] *= hp.lr_decay return optimizer class Model(): def __init__(self, hyper_parameters, parametrization='point'): self.parametrization = parametrization self.hyper_params = hyper_parameters if self.parametrization == 'point': self.hyper_params.size_parametrization = 5 # TODO: add to the case of any parametrization self.encoder = EncoderRNN(self.hyper_params) self.decoder = DecoderRNN(self.hyper_params, max_len_out=self.hyper_params.max_len_out) elif self.parametrization == 'line': # import pdb; pdb.set_trace() self.encoder = EncoderRNN_line(self.hyper_params) self.decoder = DecoderRNN_line(self.hyper_params, max_len_out=self.hyper_params.max_len_out) if use_cuda: self.encoder = self.encoder.cuda() self.decoder = self.decoder.cuda() self.encoder_optimizer = optim.Adam(self.encoder.parameters(), self.hyper_params.lr) self.decoder_optimizer = optim.Adam(self.decoder.parameters(), self.hyper_params.lr) self.eta_step = self.hyper_params.eta_min # keep track of loss value self.loss_train = [] self.loss_valid = [] def train(self, epoch, dataloader): ''' #TODO : fill ''' if not hasattr(self, 'dataloader'): self.dataloader = dataloader self.encoder.train() self.decoder.train() # prepare batch batch, lengths = dataloader.make_batch(self.hyper_params.batch_size, use_cuda, parametrization=self.parametrization) # encode: z, self.mu, self.sigma = self.encoder(batch, self.hyper_params.batch_size) # TODO: replace by 'point' by self.parametrisation # self.parametrization devrait etre dans dataloader??? sos = dataloader.initialize('point', use_cuda, self.hyper_params.batch_size) batch_init = torch.cat([sos, batch], 0) z_stack = torch.stack([z]*(self.hyper_params.max_len_out+1)) # inputs is concatenation of z and batch_inputs inputs = torch.cat([batch_init, z_stack], 2) # decode: if self.parametrization == 'point': # import pdb; pdb.set_trace() self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \ self.rho_xy, self.q, _, _ = self.decoder(inputs, z) elif self.parametrization == 'line': self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \ self.rho_xy, self.pi_r, self.mu_r, self.sigma_r, self.pi_phi, self.mu_phi \ , self.sigma_phi, self.q0, _, _ = self.decoder(inputs, z) # prepare targets: # TODO: create fct make_target that takes parametrization as variable if self.parametrization == 'point': mask, dx, dy, p = make_target_point(batch, lengths, self.hyper_params, self.hyper_params.max_len_out, use_cuda) elif self.parametrization == 'line': # TODO is it batch of batch_ini that we xant to give? mask, dx, dy, r, phi, p0 = make_target_line(batch, lengths, self.hyper_params, self.hyper_params.max_len_out, use_cuda) if dx.shape[:2] != self.mu_x.shape[:2]: print(dx.shape[:2]) print(self.mu_x.shape[:2]) raise ValueError('batch et target batch output tensor not having\ same shape') # prepare optimizers: self.encoder_optimizer.zero_grad() self.decoder_optimizer.zero_grad() # update eta for LKL: self.eta_step = 1-(1-self.hyper_params.eta_min)*self.hyper_params.R # compute losses: LKL = kullback_leibler_loss(self, use_cuda, self.hyper_params.batch_size) if self.parametrization == 'point': param_info = (mask, dx, dy, p) elif self.parametrization == 'line': param_info = (mask, dx, dy, r, phi, p0) LR = reconstruction_loss(self, param_info, self.hyper_params.max_len_out, self.parametrization) loss = LR + LKL self.size_checkpoint = 1000 # TODO: add checkpoint as an argument # gradient step loss.backward() # gradient cliping nn.utils.clip_grad_norm_(self.encoder.parameters(), self.hyper_params.grad_clip) nn.utils.clip_grad_norm_(self.decoder.parameters(), self.hyper_params.grad_clip) # optim step self.encoder_optimizer.step() self.decoder_optimizer.step() # some print and save: if epoch % 100 == 0: # before the lr_decay was every epoch print('epoch', epoch, 'loss', loss.item(), 'LR', LR.item(), 'LKL', LKL.item()) self.encoder_optimizer = lr_decay(self.encoder_optimizer, self.hyper_params) self.decoder_optimizer = lr_decay(self.decoder_optimizer, self.hyper_params) if epoch % self.size_checkpoint == 0 and epoch != 0: self.save(epoch) # Beware here I added the .item() self.loss_train.append(loss.item()) self.loss_valid.append(self.compute_loss_valid().item()) if self.parametrization == 'point': self.conditional_generation_point() elif self.parametrization == 'line': self.conditional_generation_line() def compute_loss_valid(self) -> None: '''compute loss of validation set''' # TODO: what should we do of valid_set? What about max_len_out # batch, lengths = make_batch_point(valid_set, # max_len_out_val, use_cuda) valid_set = self.dataloader.valid_set max_len_out_val = self.dataloader.max_len_out_val batch, lengths = make_batch_point(valid_set, self.dataloader.max_len_out_val, use_cuda) # should remove dropouts: self.encoder.train(False) self.decoder.train(False) # encode: z, self.mu, self.sigma = self.encoder(batch, len(valid_set)) # TODO: print the sum of self.mu and self.sigma on the # create start of sequence: if use_cuda: sos = torch.stack([torch.Tensor([0, 0, 1, 0, 0])] * len(valid_set)).cuda().unsqueeze(0) else: sos = torch.stack([torch.Tensor([0, 0, 1, 0, 0])] * len(valid_set)).unsqueeze(0) # had sos at the begining of the batch: batch_init = torch.cat([sos, batch], 0) # expend z to be ready to concatenate with inputs z_stack = torch.stack([z]*(max_len_out_val + 1)) # TODO: to complete from here... inputs = torch.cat([batch_init, z_stack], 2) self.pi, self.mu_x, self.mu_y, self.sigma_x, self.sigma_y, \ self.rho_xy, self.q, _, _ = self.decoder(inputs, z) mask, dx, dy, p = make_target_point(batch, lengths, self.hyper_params, max_len_out_val, use_cuda) LKL = kullback_leibler_loss(self, use_cuda, len(valid_set), annealing=False) param_info = (mask, dx, dy, p) LR = reconstruction_loss(self, param_info, self.hyper_params.max_len_out, self.parametrization) loss = LR + LKL return loss def bivariate_normal_pdf(self, dx, dy): z_x = ((dx-self.mu_x)/self.sigma_x)**2 z_y = ((dy-self.mu_y)/self.sigma_y)**2 z_xy = (dx-self.mu_x)*(dy-self.mu_y)/(self.sigma_x*self.sigma_y) z = z_x + z_y -2*self.rho_xy*z_xy exp = torch.exp(-z/(2*(1-self.rho_xy**2))) norm = 2*np.pi*self.sigma_x*self.sigma_y*torch.sqrt(1-self.rho_xy**2) return exp/norm def univariate_normal_r_pdf(self, r): exp = torch.exp(-(r-self.mu_r)**2/(2*self.sigma_r**2))/(torch.sqrt( torch.tensor(2*np.pi))*self.sigma_r) return exp def univariate_normal_phi_pdf(self, phi): exp = torch.exp(-(phi-self.mu_phi)**2/(2*self.sigma_phi**2))/( torch.sqrt(torch.tensor(2*np.pi))*self.sigma_phi) return exp def save(self, epoch): #if args.train_data[:4]
['PillarsofGoldLE', '필러스오브골드-래더', 'GoldeneSäulenLE', '黄金之柱-天梯版', 'ЗолотыестолпыРВ', 'PiliersdorEC', 'PilastridoratiLE', 'PilaresdeoroEE', 'ZłoteFilaryER', 'PilaresdeoroEJ', 'PilaresdeOuroLE'], '9230f3d80948da581ab945bc67a23d262d8e13e0fa11053db29d7ccd34c07c9c': ['Jagannatha'], '32469c955c8892b1a9f44c4bd2557d46866a87d624920ec64c95df5bd4b3f114': ['IceandChromeLE', 'ЛедихромРВ', 'GlaceetchromeEC', 'LódiChromER', 'GeloeCromoLE'], 'f14e37e702546cb3739a1502e84b281259d387ff6b49a1372f595e0d42beff1e': ['DeathauraLE', 'АурасмертиРВ', 'AurademortEC', 'AuraŚmierciER', 'AuradaMorteLE'], '53d02d758ba023e0d145e5e6c20a3013aaf70b41aa80a986cac905f9eb299804': ['LightshadeLE', '光影交错-天梯版', '光與影-天梯版', 'LueurnocturneEC', 'СветотеньРВ', '라이트쉐이드-래더', 'JasnyCieńER', 'LucesysombrasEE', 'SchattenlichtLE', 'LucieombreLE', 'NuanceLE'], 'dc7ba1ae7bbe21dc0ee47b43b978fe5f55dd9458db3ba53c46f066501e8d3e7b': ['JungleDepthsLE', 'ГустыеджунглиРВ'], '539c985ee93ce34c0455f004aa323a569a154cead011d3e4e5212ff7b1723865': ['ESLMetalopolis'], '0b4bc506780c04c999d5bd5f1ec129bcf1c8bde2781edaee626ff79a701437ba': ['EternalEmpireLE', '永恒帝国-天梯版', '이터널엠파이어-래더', 'ВечнаяимперияРВ', 'EmpireéternelEC', 'WieczneImperiumER', 'EwigesImperiumLE', 'ImpérioEternoLE'], '52a562c07eb3a1887b2accca881e6a9ff1065f6131741c12ed4265a08bb62950': ['PillarsofGoldLE', '黄金之柱-天梯版', 'PilaresdeOuroLE', '黃金之柱-天梯版', 'ZłoteFilaryER', 'PiliersdorEC', 'ЗолотыестолпыРВ', 'GoldeneSäulenLE'], '81a673febf5dab0be0b70be74838ad6dacbf1b3ac338cc3be5b2a46556fa1783': ['SubmarineLE', '潜水艇-天梯版', 'SubmarinoLE', '潛水艇-天梯版', 'PodwodnyŚwiatER', 'Sous-marinEC', 'ПодводныймирРВ', 'U-BootLE'], '817a7d97e089642e25f9ac718158074d40e1d1833ba1ffc6e225213f353cb66f': ['RomanticideLE', 'RomanticideEC', '紫晶浪漫-天梯版', 'RomanticídioLE', '羅曼死-天梯版', 'RomantyzmER', 'RomantizidLE', 'РомантицидРВ', '로맨티사이드-래더', 'RomanticidioEE', 'RomanticidioLE'], '30fb32bd4b28706403f57a3931d8707bf113a311df12b73fc4c6dadcd942b51c': ['DeathauraLE', 'AurademortEC', '死亡光环-天梯版', 'AuradaMorteLE', '死亡光環-天梯版', 'AuraŚmierciER', 'TodesauraLE', 'АурасмертиРВ', 'AuramortaleLE'], '<KEY>': ['OxideLE', '锈化山巅-天梯版', '氧化物質-天梯版', 'OxidLE', 'OxydeEC', 'OksydacjaER', 'OssidoLE', 'ÓxidoLE'], '<KEY>': ['JagannathaLE', 'JagannathaEC', '世界主宰-天梯版', '札格納特-天梯版', 'JagannathaER', 'ЯганнатаРВ', 'JagannathaEE'], '8f99dfc80e4d683eded839477bb724b06327f3d0c628ff09a0774335c69b2acf': ['LostTemple'], '597db7c89816b9a3761a4a4ce21e47883620ebc88db177051189af121c294a64': ['JagannathaLE', 'ЯганнатаРВ'], '33b35437ef1fb6bf7ce00e27cbfbfbac812c09e333e5aa9ead0facfd809b2cd6': ['EverDreamLE', '永恒梦境-天梯版', 'ПомечтайРВ', 'RêveéternelEC', 'WiecznySenER', 'EwigerTraumLE', 'SueñoeternoEE', 'SonhoSempiternoLE'], 'b6d4e68a01d89755703d05c2dd93cc3644e83e99946686b8a6d1e0fdf457d766': ['DeathauraLE', '死亡光环-天梯版', '데스오라-래더', 'AurademortEC', 'АурасмертиРВ', 'AuraŚmierciER', 'TodesauraLE', 'AuramortalEE', 'AuradaMorteLE'], '929c215029a6180ea7c8dd3b8d7670b0777ee07e23baa8145681a80ad85fc07f': ['DeathauraLE', '데스오라-래더', 'AuraŚmierciER', 'TodesauraLE', 'AurademortEC', 'АурасмертиРВ', 'AuradaMorteLE'], '60302d43fbbc441241f4a8ae22e23d78f5f7c281a877d6a02564c779a5e19564': ['PillarsofGoldLE', 'GoldeneSäulenLE', 'PiliersdorEC', 'ЗолотыестолпыРВ', 'ZłoteFilaryER', 'PilaresdeOuroLE'], '7d6bd5f6ba938b6f478d374f457feb217f13ef69a1d0addfefa65807ab4cf658': ['DeathauraLE', 'AuraŚmierciER', 'TodesauraLE', 'AuradaMorteLE', 'AuramortalEE', 'АурасмертиРВ', '死亡光环-天梯版'], '991e96a7b6dbde3a23ba6bad7886a5c0ab852c09f36a9627c326c48d9975ec9c': ['PillarsofGoldLE', '필러스오브골드-래더', '黄金之柱-天梯版', 'PiliersdorEC', 'ЗолотыестолпыРВ', 'ZłoteFilaryER', 'GoldeneSäulenLE', 'PilaresdeOuroLE'], '8ed52c821dff4fb95f9c471a2fe790744d7fc680bc85e6e3487e8811c862ae27': ['아이스앤크롬-래더', 'IceandChromeLE', 'EisundChromLE', 'GlaceetchromeEC', 'LódiChromER', 'ЛедихромРВ', 'GeloeCromoLE'], '847d6431f541ec0c85e7dd39264108ebb48a21fd0356b9b76dbfb7b156396ac6': ['서브머린-래더', 'SubmarineLE', 'Sous-marinEC', 'ПодводныймирРВ', 'U-BootLE', 'PodwodnyŚwiatER', 'SubmarinoLE'], '84dea9d10a5623939206b2d8fa370f3a52e5f3f035fa9439f5ae64d14c306a5d': ['아이스앤크롬-래더', 'IceandChromeLE', '冰雪合金-天梯版', 'GlaceetchromeEC', 'ЛедихромРВ', 'LódiChromER', 'EisundChromLE', 'GeloeCromoLE'], 'f21164bc435bd33b90155c1d10fbf1e7437c5bcb19169cfdb25fcc0ca8a62d8d': ['RomanticideLE'], 'ec90fd4c1307eaa729c61d7e5c7e172c3caf5c6c4d875029f74ae20b21767bce': ['EverDreamLE', 'WiecznySenER', 'RêveéternelEC', 'EwigerTraumLE', 'ПомечтайРВ', 'SonhoSempiternoLE'], '6519d1d9cfc97de50486c8554fb3458d68cf3b464bcdb29871c01b02404ef77c': ['SubmarineLE', 'Sous-marinEC', 'PodwodnyŚwiatER', 'U-BootLE', 'ПодводныймирРВ', 'SubmarinoLE'], '22b1e7514c189723f4ff4f2d74abe1f4edbfdc0d2683e79fdde0649485bf1de2': ['GoldenWallLE', '黄金墙-天梯版', 'MurdoréEC', 'ЗолотаястенаРВ', 'ZłotyMurER', 'GoldenerWallLE'], '46912c153af6ffc981e4e010c3224cf3b1e93ed1d9673bef64a759366be8d834': ['EternalEmpireLE', 'EwigesImperiumLE', 'EmpireéternelEC', 'ВечнаяимперияРВ', 'WieczneImperiumER', 'ImpérioEternoLE'], 'e698c4ab9ed516f15a99f561e7d872040f01f7cfd7c963995e9176bfbab973bc': ['GoldenWallLE', 'GoldenerWallLE', 'ЗолотаястенаРВ', '골든월-래더', 'MurdoréEC', 'ZłotyMurER'], '687cbda329cc700888fe80463b94ac9bd2d40a70a41534398f03f2a8c8a9a59e': ['AugustineFallLE'], '50466aa59dd9bde7f0a3c80c8a36f3db19131ffaa8b5121e21a5652bf7f99f32': ['MultiprocessorLE'], 'f44f2d90eccf52d95c50efa072fe3d706ba990f02c908ac322120cf9ce83076c': ['OldEstate'], 'a4a9839abcc342a2f27f09a602703b5dc8aa2ceec177739bd8f2c9944a232e87': ['KairosJunctionLE'], '825e81c1bdcf9b499a9de8602a878300bef6991cb941f2e624ccfd4dd06af377': ['AutomatonLE'], '2413459abca589f4465285e847279d2a9ae9de50a6205a495ed19d210f0dcaf5': ['PortAleksanderLE'], '027f64594ccc0530ce4b217b8f2ee06d9352c566b2e1e06b1825316d90fd3652': ['StasisLE'], '2a007cf32ae0ae370384640f3ece5474f1539fd0ad99438e16d04dd14f9293d2': ['ShrinesofLizul'], 'b43155849e7576a91640f11b1cbd5bf59a06502ef603c405985366df34ded25e': ['FloodedCity'], 'fe7d8684ba62f89ce12041bdca601a855d196459bef9c7bf4bd239ab08dd6afa': ['DustyGorge'], '10d370d66a6be404f09cea8eca8878251552372f3a951ccda366fa6569008b1e': ['Ujari'], '79820098d64360659a6d089fe79c3273df2949e966780c3bc5f4f7eab6e3caa9': ['BastionoftheConclave'], 'a31acd143bd0659c153c32856835208053fd69122acfefc7c55b6f639b3078ca': ['SnowboundColony'], 'd317b24d263bc4c6d574bbdbe3212e9f1209d9c11b06bcd51bfa39acf54a1704': ['DreamcatcherLE'], '0d7d6527ed73604745ec2a029216f734acab980c2f5db744e4f0079d68ca7fb0': ['LostandFoundLE'], '<KEY>': ['16-BitLE'], '7a47dfd6997c8ea735450810ca546382d8f65649300a513070e13b5e25fdf074': ['AcidPlantLE'], 'ee56fcd29828845f2ba08ff9a1570a301cb4c04463b75443927f4fec247a1374': ['LostandFoundLE'], '68c3c921acffa55e7aef3bd7e27e13952b50a0b5e047ec49f24779bb5704b612': ['AcidPlantLE'], 'a09c8b2f845dcf52a38362c94f2637fd0567ed3d0841267b2c1d6caf237f90a4': ['RedshiftLE'], 'a2b4e9092af4caf6239f38866521a2508be9f6bab89d03e5e179b879234d2267': ['DustyGorge'], 'd8ba8c8c5b5020233278abacc6d403ed4d237735a64189d30eeec1a1291196cc': ['RosebudLE'], '<KEY>': ['OblivionExpress'], '9e59f198aa034d5b59b8ae94b19a63b098946eaf35c146b4da22af61b50371eb': ['Lock&Load'], '<KEY>': ['MistOpportunities'], '<KEY>': ['BuriedCaverns'], 'b519addc56760a6c62763edbf83b40ab6861c3bf6f45b4de235204db8f74fe96': ['Lock&Load'], 'de1dca5d26f728340dd3a4ae71d4837572d8edb26ab698e13b9c7133027bac70': ['AridPlateau'], 'b5b3e8af53340bab2b05d39e870c1fe2e7cef8e567c93095474363514158e101': ['TroiziniaLE'], '788305b7e13085ba392780a926a0020cc1842c40614c3d7dd3dcc132810cee8e': ['SnowboundColony'], 'ce47606d49958893afff1fa1a06ce200aaa2478436643dbe1eb9ca6b52546e8b': ['SnowboundColony'], '173e6e07345f9419d3c354ff3ca31fa792e1217d1bc826bd5e68119d2256679d': ['EternalEmpireLE', '이터널엠파이어-래더', 'EwigesImperiumLE', 'WieczneImperiumER', '永恒帝国-天梯版', 'ВечнаяимперияРВ', 'ImperioeternoEE', 'ImperioeternoEJ', 'ImpérioEternoLE'], '7d3dc685326cb28ecdcc79a23e5d0380504859a80f2eade3f226f2bad7cf0663': ['GoldenWallLE', 'MurdoréEC', 'ЗолотаястенаРВ'], '7e631b8446e87c913fbd37d8ea1692f4b8a97e990b8acbdc649d916a00199a17': ['DeathauraLE', '데스오라-래더', 'AuramortalEE', 'AuraŚmierciER', 'TodesauraLE', 'AuraletalEJ', 'AurademortEC', 'AuradaMorteLE'], '333a72d095a9dc010935f1c9933620dd83f0f38ed13999fc27fd74920249442d': ['GoldenWallLE', '골든월-래더', 'ZłotyMurER', 'GoldenerWallLE', 'MuralhaDouradaLE', '黄金墙-天梯版', 'MurdoréEC', 'MurodoradoEE'], '868ad12fd69071a85001065e829ad93a05184e49ab737d86768d50001f5c3238': ['PillarsofGoldLE', '필러스오브골드-래더', 'ZłoteFilaryER', 'ЗолотыестолпыРВ', 'GoldeneSäulenLE', 'PilaresdeoroEJ', 'PiliersdorEC', 'PilaresdeOuroLE'], 'e7ca9b56df33202f7b080bc98ae013e5daf9c6029a0392895a37851b4114a870': ['SubmarineLE', '서브머린-래더', 'PodwodnyŚwiatER', 'ПодводныймирРВ', 'U-BootLE', 'SubmarinoLE', 'SubmarinoEE', '潜水艇-天梯版'], '9ba880e345235d10845147e6b5a7e4b1e6229a46ef0602ce4c690e39ca60a16c': ['EverDreamLE', '에버드림-래더', 'RêveéternelEC', 'ПомечтайРВ'], '849b250914ca78c95e145fd4455342a9599a297ebb3a96c90e7196ed1d8e3ceb': ['EternalEmpireLE', '이터널엠파이어-래더', 'ImperioeternoEE', 'EmpireéternelEC', 'WieczneImperiumER', 'EwigesImperiumLE', 'ImpérioEternoLE'], '7dd7ff737d93e63ea3086ffee36b64ebf2692160d00823c9da85543f86e22bd6': ['SubmarineLE', '서브머린-래더', 'Sous-marinEC', 'ПодводныймирРВ'], 'ffcd9cfa7b055c6df1315dc82ba83acf070535553038a2f57ebd39b238637420': ['서브머린-래더', 'SubmarineLE', 'SubmarinoEE', 'PodwodnyŚwiatER', 'Sous-marinEC', 'SubmarinoLE'], '54a1058a21374fff3888572b66665a570625daeeb9e9a30b5c30563e4f44b6c5': ['IceandChromeLE', 'EisundChromLE', 'LódiChromER', 'HieloycromoEE', 'ЛедихромРВ', '冰雪合金-天梯版', 'GeloeCromoLE'], '4bd24c342ad5cec844fb926984dda6e66f5d68e9f6ddb01c4a9b28cbe9400186': ['IceandChromeLE', 'GlaceetchromeEC', 'ЛедихромРВ'], 'f927eecadf18585a92c21791f578cd89c53086ce87d6f325adf1bee26e68c963': ['PillarsofGoldLE', 'GoldeneSäulenLE', 'ZłoteFilaryER', 'ЗолотыестолпыРВ', '黄金之柱-天梯版', 'PilaresdeoroEE', 'PilaresdeOuroLE'], 'e308bf70dd82f02e6f34eb661134c70fbeaa3712357c2cf454cef4c74edcaf23': ['IceandChromeLE', '아이스앤크롬-래더', 'HieloycromoEE', 'LódiChromER', 'EisundChromLE', 'HieloycromoEJ', 'GlaceetchromeEC', 'GeloeCromoLE'], '6bbaa5f29a9eba0669c3a60ade6de18563088c679a8c7cecf9a6f6ab23d163c1': ['EverDreamLE', 'EwigerTraumLE', 'SonhoSempiternoLE', 'WiecznySenER', 'SueñoeternoEE', '永恒梦境-天梯版'], '6e933e7b9b16bede68d2cfa66849fc47ad942a2ac5823e852ce46e2bc4b37d8f': ['GoldenWallLE', 'MurodoradoEE', 'ZłotyMurER', 'GoldenerWallLE', 'MurdoréEC'], '0653f23b7745a053d6adf62f7e98cd8f2e6833f712283ffe2e55b9fe422e72ae': ['EverDreamLE', 'WiecznySenER', 'EwigerTraumLE', 'SueñoeternoEE', 'RêveéternelEC', 'SonhoSempiternoLE'], '7ef10de788e3159202f2850263429f7c573b32f357502617919f35aa46aea659': ['PillarsofGoldLE', 'PiliersdorEC', 'ЗолотыестолпыРВ'], '36fa737efd00e2488f702d79f729a39e12bbfcd71a512dac6265c2f5a84f7d6b': ['DeathauraLE', 'AurademortEC', 'TodesauraLE', 'АурасмертиРВ'], 'd2133278858682cb7d0e940f213a631c6f6af56d7d4912a746e7a231e7610b20': ['EternalEmpireLE', '이터널엠파이어-래더', 'EwigesImperiumLE', 'EmpireéternelEC', 'ВечнаяимперияРВ'], '236d0676f3578a3cc52d589c9b7d75663c6200106c161327e00b1cd5de040a8c': ['MLGTestbug'], 'bb12240d1748afb6b728a7c6ebd716f4ce5bf3cb49bbb8062c33e3b4938c617d': ['FrostLE'], '08a7d2815502e66c8db63360b1fffc8e513cee857e9802921bcf27fd472b8c7e': ['Metalopolis'], 'cfded85f4873c93793d045708458bfa3cd6491513db5ff3128a74d9c9d9a0a96': ['EfflorescenceLE'], '<KEY>': ['EmeraldCityLE', 'CiudadEsmeraldaEE'], '<KEY>': ['ArcticDreamLE'], '<KEY>': ['NightscapeLE'], '1a4ef4133661416476c3f77cc55e63e952803eb69cdfc7defe0b635a2ca1174b': ['SentinelLE'], '2a79972c0b15792a056f1a8406f5a08a38b4900a0850a4b521dfebe5159483c6': ['AugustineFallLE'], '6cfeb912a2321c0982847c3898c5f992f95435a100cf01c7849ca4e517183a0c': ['FlashbackLE'], 'd9550c461a3eee30ff12fd5f75929c7b8a0db59f6647f13ca255a79c9e6372a0': ['BoneTempleLE'], 'c90f3d56ee666a89316bf0caa03b07acf8b0f7248650324efe03da5e3b7566a0': ['DivergenceLE'], 'd7a5e9cd9f7bbda61a1e2e52d2d1df7e2af665a6921f93d801e5682138b03b24': ['ShipwreckedLE'], '5a03d18c06a2185f6c16bde8bad60dc66140bd602581074ec5a5d032d5309c74': ['OldEstate'], '4406cde6a7d04d29855591fd09c369457135f96e3fcde87df0228cad9415c3c6': ['NekodrecLE'], 'a51c5d05796795fb502e38afc4e4412f1bb0602a02052ab28b92abd630cdba4a': ['RhoskallianLE'], 'cb7c50bb6c03640f92899da37656ae45d8478a671a9b485e641281f7d1641b99': ['ConcordLE'], 'cd8229184d72cba2246b20eb850b0d2be9aa17154d8e77b730788de750d8fc08': ['WhitewaterLineLE'], 'aec77e9680c95245a685c6c54407356ac970929c777ce90abe7e5cccbe6e79d6': ['HeavyArtilleryLE'], '81ab918bfeaf3e7ba35e0ef722fc0f36abe59a93a87ce4723bb7986897d07c2c': ['MultiprocessorLE'], '89fa02a1476a52d28ceae84c8908200901d75ae515ab8fb9ccc6c5ce45255158': ['十六宫格-天梯版'], 'ec9d60ddb5bc90c8cf4c2a45719835860d6cb6ce0e089c7a778d41fe52377b0e': ['血沸-天梯版'], 'f535b4e4dfb0d6194a24d54272fbc6017add46c761d835cce5de5c9b36ea760d': ['血沸-天梯版'], '331e0dd6e7437b781f6c5e9eaa8f86f8746ef7faecc02ff098c3065327dd557d': ['十六宫格-天梯版'], '92c8c9950d2b90e4a4586682b9a2dd30f4396a54263a7e980410af6f7d518e77': ['飞升艾尔-天梯版'], '59f561e4f08ddffd2f94e5ac34b41dceca654d09e1ce727ce035866c97104331': ['保卫者登陆场-天梯版'], '45229064ee6fee587e1b3f64d04b8997b800ffc8f8289cfb2e09de8051d81512': ['血沸-天梯版'], '1740f2a4586b4d96d247d2e6b8f63d7db040a4fd2eb917c0e70f35da6dfda404': ['飞升艾尔-天梯版', 'AscensionversAïurEC'], '9f8337919f411ea70d8f96531563e4caf777e99d0aebe6819a6419da615bc809': ['保卫者登陆场-天梯版'], '8f08d7dae9455d72e4584d3a1b528cb787f20ca3dba1150a08f8893fc812b9b6': ['十六宫格-天梯版'], 'c46640f678047c3359cfd049a81c591f32d0d4e4f96ec736dc08fb9b30941a5b': ['奥德赛-天梯版'], '92c664d4605e80d2258132b3191df96169b4d330c5ad374aa778e0b43049d2cd': ['帕拉蒂诺中转站-天梯版'], '7fb3dbc2fbc4d15f5ba936b4cf0b2875b2aa4fc4415ff57f88c1242c844e9ca2': ['荆刺山谷-天梯版(虚空)'], 'd0f88c36ea5f9411e3c3475c58a4b7e48962d1b7be6d590f358137baa2a82ba5': ['荣耀战场-天梯版'], 'd1e77470edfd9311cccccca23c969d393ead5bc31804e9e6544203d5a670ee21': ['霜寒之地-天梯版', 'GivreEC', 'FrostLE'], 'fde86a03ddd554be66d5229040b5c9e0700f75870257a4ca4edee7b28c3f3106': ['封神之地-天梯版'], 'ff0296cbfb182ec2103e0af820677fa1830be8262e73cc89ba56614782e95bfc': ['CanyonofTribulation'], 'b9ab774f34720056cfda53b36b56a0d19329ab345981225f712114a7820c66d0': ['BelShirVestigeLE'], 'a7507d13c1fb0bd8e0beee1f6df644d445b31e3c641688fe3f6b466bbd064a08': ['BlackSite2E'], 'b7b01d39457d58d3f1d03d79d0fb068e990d837471fa58bd89c9d2076921bf3d': ['BlackSite2E'], 'a8581298cc376ac8ba36752e48f886e72ee5c02cc9dea4ef74d3f584735949d5': ['BlackSite2E'], 'ccf88cb359b0aa2e69263fc74157264fc6a65924b926a537792c6e65366e54f9': ['BlackSite2E'], 'a26115b39698ed4c3e5ae17f4149d74e39ef6abc54f3192e908bf92a5e7ca919': ['BoneTempleLE'], '6e71f665518422ea71804b981259459a3982f5cded38a3a033be07f1bed3e838': ['BuriedCaverns'], 'b569b91482d8bbbbcb28c472426f0d8d06b14f0af24ece8ecf87357d632c69b0': ['BuriedCaverns'], '<KEY>': ['BuriedCaverns'], 'ef4de33963ceb86b739ac981549bda0993f580af6091c9e28e036e4a9704b5aa': ['BuriedCaverns'], '8d18334add87c65eac32b8dacb351d0e93c2c9e721d4ff5ec8290e975d10faaa': ['CanyonofTribulation'], 'ace95a237b021b784f4a25a4b835597b41e4d11dd0771ca29f46789f140b06f1': ['CanyonofTribulation'], 'a5384c70b5d6a6a8d26eceeac8b3e807d4f0d5c808f3634b46314be06951b260': ['CanyonofTribulation'], 'dd81c2083e1f7cd58cca8b592b125750256fb898f074b94faad767a07b0184ac': ['CanyonofTribulation'], '03e8000e81b954057f7e51a4c9d1b0f0aa1da7f9756d5fb514f4b8aa5a5526a4': ['CanyonofTribulation'], 'aed9fa11707f20035fb4ec0553d5ab723d0501d71c865a7994e965350651c4d0': ['CanyonofTribulation'], '958da161604764f7b1cd57110a40545f410d663d7b8a57671b9894dd132724f1': ['ConcordLE'], '44cbeb783fc1a7f37563dc8b881726d9be37929eed6351d67a7d3ae6a3158231': ['DiscoBloodbathLE'], 'ea96b4f748d13648551075e02d863c8e16be8327332c5dbacb13c16946bc47d1': ['LastImpact'], '6f948119e08088f9fe4e6c02f048f910bc661ae112ec95b1584fce81abc2af11': ['LastImpact'], 'b7b989963c5f44c1dcc67b7907faa8a176dda201319fa24366cf362e5bd97999': ['LastImpact'], '07e1a1d7786faaa2687e9d89abb4e8f26312a5197c4e93e4c429922faf1d2b7a': ['LastImpact'], 'af766a90b412fa09a05ff073ea16540471cb60fb543a7b0841f7c417e8099e86': ['LastImpact'], '464fc73846b2457515a4caaa422451d99872ff31b17a050a2244feda9862db17': ['LastImpact'], '3c71f1b82d4ea21322fb0cbd244cece4fc2a91f8a83091c6b1d82e6e8a911e5d': ['LastImpact'], 'e62bdb112495892ea302ca9ac5454aeaeadd23d63fc27f7b0af9b50cc9fea331': ['LastRemnant'], 'a1d58cb3b052a7e326215eb2ecdfffe0a3f9a6477761dcf9a4318a3c8c5ce178': ['LastRemnant'], '11fd05f0632f0644c097b58dff04d1540f4a4f910be425497266fc3b5de4840d': ['LastRemnant'], '7b870fe27fa8e2edadb9592533565d517d7614c0482f8e844c0f2b279512c774': ['LastRemnant'], 'c9f65e0404e513926818a15eaae7d9b26319ce1e4b5daa5ebb64a11c76df81dc': ['LastRemnant'], 'cc7b8a68b57ba4c7471dd6fb76f426fdd4b1b7f8938440a732471d1b1585c484': ['LastRemnant'], 'e7092ea3fc23c8e2192a7381807a518030de5cad14aee0abd5be9013dda6ca61': ['LastRemnant'], 'd00f4bef22ff2277cf8be455134a6a67dc6788a9569ff1a189a4e48f8d4ab708': ['LastRemnant'], '3ea997abd97513c98364e08fbbe715e3a64251ee46150fe7821df9de160b7c92': ['MonobattleLotV-MapRotation'], 'd14e0722ad99219a3aa4112eb9221b2b32778109e401e74a52a46805f40017df': ['MonobattleLotV-MapRotation'], '<KEY>': ['MultiprocessorLE'], 'de73d9fe57867d2ea656e566fc8bfce970a02772e26f106d06c9a592cb620a0d': ['MultiprocessorLE'], '698495fd7708dbaf88c9bf0ee313ecb20ba396d4a786571a90da5cfe226a09b2': ['MultiprocessorLE'], 'f90a30341994b2deac785ed0ca7f831061915eee56b6d8818dd9d61c6708a5e7': ['MultiprocessorLE'], 'f43f3511a02763d02ba89cf861a93b31bf508bd013480e5a83477ebcf772d724': ['MultiprocessorLE'], 'c8dae8b81beabb46f8f310c3f51b29910262d6dce3107d00d60835d0adb4eba2': ['MultiprocessorLE'], '746a2a2892df4bf6ecaa5934c85f3a5ed01a3119aa6aa392a99d595775a30327': ['OldEstate'], '01bd9d31d4da6033cb19c602804e484d91af135a3751e934b02cfbafdb2f7680': ['OldEstate'], '9b709e892b048a198175e2a33aaf06310f272ac8d18ea1e30e10a937c18ec319': ['OldEstate'], '19f192d9ba93d2ab3d584997644c2272aaabc95e7b641e9a2574b818b5422f81': ['OldEstate'], '6151fbf19948196b15fdb26d58251a64cc2f183fa1b3ffbd47d9415c71cae16f': ['OldEstate'], 'ca378f9b7d2dbf66b474b58e7cbdd9598e9d9677b3d9f12b8dc6f2ed983b6e27': ['OldEstate'], 'bb8aa912cacd216f41a9249281fd47bd0382c3e2fca1c2695fe88195e7df66ca': ['OldEstate'], '6766211bfce07607345e8446966000ab501608ef6ecf1488810024b94c375ca2': ['PhantomMode'], '1a5bcc5ba95c2f67b231eaadd8aa6ea8cc5533a6ba581d763a32e5b54d398efd': ['RooftopTerrace'], '<KEY>': ['RooftopTerrace'], '<KEY>': ['RosebudLE'], '47db46301ce4ce3e084f3d956b2ba0ecff45c5b5d76c697df6cdb262f74c75fc': ['RosebudLE'], '47ba7281df3b8a2fcaebbefe36f63c055f91d2b28b30b28d771b86425a69b779': ['SacredGrounds'], '39646f358c4440d751f414333cd99b1a8d99b2cb86b288082492956be8233a41': ['SacredGrounds'], 'ac6f6aba46b2529cadb4be2e4e3ef18fa9504a62084a709bfab65bc68e53796d': ['SacredGrounds'], '6f018105b3b14c6fb7853835499bfb28027b5ef590a35f5ec275913f6f3f5a8b': ['SacredGrounds'], 'e4127ffb4501d662d2331b46a8ff8fce7b3ab2e833be04404ddafe72a9e15348': ['SacredGrounds'], '505f4f655d82651a06941b4d353c28ea95d3b8971be5f964853eade4a4c6a320': ['SacredGrounds'], 'c7cea6c9364c6c989e6ef99676dd614c1b40e9841711e3c9a289c9616d78d098': ['SacredGrounds'], '4031ec7a5641f6ad78e08c89c767cf92018488a0ee30f707905739cd2bdda0cf': ['SacredGrounds'], '92c8ce167da081d687d4b3c7449336a4ec9544202f8d3c036e72da4a6f1b405e': ['ShipwreckedLE'], 'd9e2e6af6131ae82182cfeac7c09a42994fcd2f71961a72cb10f31852d6b1509': ['ShipwreckedLE'], 'd8a75644aa881b1582463f0e9eb33130b3b2a943fc1e457f2c851b696fc1a51f': ['ShipwreckedLE'], 'ec7bfb48e197b61b71ce74e38116d0a13db0e7e44ff2d67deaf11127a06ac1ab': ['ShipwreckedLE'], '0cd8ce5f10dfa28fedc367b4c046463f0415397851d2a1c58b32f4bed0636463': ['ShipwreckedLE'], 'b5b0999f593fe2abd437e3ab8ee1324e13c9e8262f265045c1986f6346255543': ['SnowboundColony'], '7ca8a448d38081c852027c1390ea2b24c9ed4e4e7974187f14bb32eafcd65166': ['SnowboundColony'], 'e96e5926401de8c2b05016af92d07276939342c531ea19b91d1523885d345028': ['SnowboundColony'], '2af8f151dcf79d4fedb1697f0c902ce4d74f853b46f900922aecb5a37ec192f4': ['StrandedIsles'], '<KEY>': ['StrandedIsles'], '3df2a55e52828ad202c8fce79a540153bad6e086c901f2a8073b204367776869': ['StrandedIsles'], '604e177c3e10dad8c97f0ff2475dfacb902d1beb6a09278d75089f9f8a887921': ['SunsetValleyLE'], '<KEY>': ['TroiziniaLE'], '22a387e73e08ff3a2ae80041bd3ee6bf5fa43745b48e96e5e6c7ef7ab34ecf00': ['TroiziniaLE'], 'cf903f14beed452628f7fcb44824931813b0a0e9508bc15eecbfa4175bbb493c': ['TroiziniaLE'], '91139917dc85554cf77720b8cce8c65f2a7766bc9b6c36311e4bbbaf630e27e5': ['TroiziniaLE'], 'c31de3e9583f9dd637720ca146af814ca975930a61c6bdcdefa419c6f30757a2': ['TroiziniaLE'], 'ef62f1ea6a8ae60b493faffbdf49c5ce01873abe04caa61e28a18df94d27f4fb': ['TroiziniaLE'], '9dd8ed14cf6110df8a2f8b0849441a234c6e2ebaed6475b19eff5971bad56875': ['TroiziniaLE'], '4ac94ba85b421e7e5a92ce8fa6a4cc4dda1bf029cf08b92a20d6665a50a38d4c': ['TroiziniaLE'], '80d8886e348a5a546e5b37ba619578e3e55150e9284a7f9a1f7abb86b0828091': ['WhitewaterLineLE'], 'd480160c89eede0a72e885215e34aeef16b90545d5fe8c036bd7637afa648716': ['WhitewaterLineLE'], '64ac6e345c1424de97310e9c8dcada62e6246377a29dadfc8b93921b68945e50': ['WhitewaterLineLE'], 'f5ac3545e46af6a44f0f64db6784e2a55935b1f6fc82e4c4d774b408df0b7931': ['WingsofLibertyCoop5.TheEvacuation'], '238834dc85134af6353b5b3afbb9e7f338538375890a08b816edd69e2599bc5b': ['CatalystLE'], '6a0fe00974a3c0bcfed36d35036aff9d1738e5e08e743af57fdefbef4a9e87fb': ['ShipwreckedLE'], 'f3c50486c2c5f3a7a7c634bfaa9c3da60d5e5e200b904fdf3c81a17f833f90fb': ['AugustineFallLE'], 'cd888a2d7746dc0054d47dfc17c1752c31029182583ec9ef4fb904d3df2f047e': ['HeavyArtilleryLE'], '4755d574fb568bfc69da4eebd62de7ff518bf4a95580ecc9446e5eb888e1ced1': ['EmeraldCityLE'], '6c9240c3c5d735cf0efdb980cb9b07c23a5ae2e2154854dcdc3aad0fa9e91993': ['NightscapeLE'], '9dbebe40f345b2b0285276773ae717a646b3108a77cd1aa1bc7e11e63cbf67f4': ['JungleDepthsLE'], '53d5a1175224975ccbb9d3831f93a3603ee41bd4e399ef3339b2c658b88f45bf': ['SentinelLE'], 'ed33ad6cd1a4a5298668b2716999dc965cd159262a0c6e2a5d999420b2c4a28a': ['MementosLE'], '601da7c727dcd78e9c67f95d9d1f52ff3dfcc6b957b215c23b306ce8c0554cd4': ['MultiprocessorLE'], '7b8ce0fef7717c8d1bf52357fc19f6035bae5d3850c6479e9461b792547c0af5': ['ConcordLE'], 'b806b07283d3cbd3707ef192f9eaefe2ba262110a24739f7c1af87f89670324b': ['FlashbackLE'], '6a39ca4c99e2df54ae6bab3b7d2322664603c02a5b86920189227dbd4fdd8443': ['OldEstate'], '1eacf206eafa467fe7fb223848b304e2b8f61d16737a057c99eaaa5cba6084f9': ['FortitudeLE'], '85611c4be8b86b8f3ba47a2f529db05e8510baff103d0dca63358d0081695e02': ['BoneTempleLE'], 'e1e9caf0aa10e101d5bd64d50ef4369353b2880f28b2b0fbf8ef894a331b1456': ['DivergenceLE'], 'df6b29adf1c159f6bdd58bfa34e2d8b21392f70dfbf97bcae973ff438c3e316d': ['EfflorescenceLE'], 'a0c429a48a8463f7e049952045c3f1adcb9069ae0bcf3a77dbbd924dc76e7d2d': ['RhoskallianLE'], 'a4b6fbfcc30fa652920919d577c950c42e878ee38061d212411e9c08d8d82668': ['ArcticDreamLE'], '0549f2e8c31d56f12fe6dba2198cccec16e36df25b60bf0d9e65f2c953f96422': ['FortitudeLE'], '830b950c300e996226882f4e34f2aebc8b8d06a429d3f3738e3bc20b401053b6': ['MementosLE'], 'a449ed9b55bd48696e38bfc7463ed177fda01c830880aadbd8f6438088d2a8fc': ['OldEstate'], '7ca739f2d7118bbed18318de805cd5933d9ce5b16328ec7cbd65ea02f0f0d2a6': ['NekodrecLE'], '95b7130c89d989819b1ac265951d8c627da9426f1cb236d8edc5d0b9cc37e310': ['DivergenceLE'], 'ca0ea07ed8f807b2763d7fc70b0e65da28a0164076f1de545acd16f7d60c8b8c': ['NightscapeLE'], '8984bfc09e7f703f1ce197583b7bd8ef6cb694ae1e695deb540d164dbfc0ee9b': ['HeavyArtilleryLE'], 'd10f444a57276a8d7c2616cffdb0d2b27e08c14e17fa653e69da52f2425a39a8': ['EfflorescenceLE'], 'eb881298e9e98a9ce955a1e834ec52693ed0dae8e5ce75345cb896842fe3385c': ['ArcticDreamLE', 'RêvearctiqueEC'], 'b95e7159c18f38e76711c4fe0d97a0038fb6a1bc9543367738d5d4ba6832e660': ['RhoskalliaEC', 'RhoskallianLE'], '0949f1012517d19cd4ed8ad7ac83c64729e3c3b78d999d70a5e863a3570f9142': ['ShipwreckedLE'], '12bc871cbe73f529e85d52c5a22970103f7c519b112b848f05f74ca8ec1c950f': ['SentinelLE'], '62dcc6438f1575db245bbfdda0952df3b1e52967b5bb399424a90bdea996f21d': ['WhirlwindLE'], '0772eebca8dcd8538a3c7adda5d81353ac54b1c540108f7895dce9238d057e6a': ['SubmarineLE', 'ПодводныймирРВ', 'Sous-marinEC', 'PodwodnyŚwiatER', 'SubmarinoLE'], '4723593342c8ea2ab7e4c6482b52d3129a4e01baa3d801aa4a52e4765a34e4dd': ['EternalEmpireLE', 'ВечнаяимперияРВ', 'EwigesImperiumLE', 'EmpireéternelEC', 'WieczneImperiumER', 'ImpérioEternoLE'], '77efb7483f953ad7d863794d3347edd3e7487fe7a2c8d03c7fe654b004a3e97e': ['EverDreamLE', 'EwigerTraumLE', 'ПомечтайРВ', 'WiecznySenER', 'SonhoSempiternoLE'], '2f6c6d3d9d58e0d448e8bc9bfaa01f951a6902ba26a07a92f306515da7edf633': ['PillarsofGoldLE', 'ЗолотыестолпыРВ', 'PiliersdorEC', 'ZłoteFilaryER', 'PilaresdeOuroLE'], 'eed88e65736ce0e799b25b8a2a0d39da01389f34ea1477e88ba8a3360231e27d': ['GoldenWallLE', 'ЗолотаястенаРВ', 'MurdoréEC', 'GoldenerWallLE'], '25dbf937b2f7b5a5d2530085032b574a5cbb2e5c79d9f697426c5f2e088f5319': ['EfflorescenceLE'], 'b58b7b55772a25e9b38a9b0ad72f4655ec43a0dcbe80dba04977b9bfbcd4fd76': ['DivergenceLE'], 'b47f7ad437fa7d409589fe17bf26ca364acefa37bbff16b85539953b63c827f8': ['ArcticDreamLE'], 'e0584bf2714034ab641c0e05a0fa08f1d6889aafbc4c482de881b050c9f8d866': ['HeavyArtilleryLE'], 'fd862b572ed2582a679385aa9ceb5c947082bb0909b4c84bce0bc770a6633c97': ['EmeraldCityLE'], 'c3cd965f16b4057f2180b464332e62596f59f22fb590858dc3fd2b63f24d323c': ['OldEstate'], '10d47214987f5564ee39fcae02a30523f4bbd524fcc6695476580d88a049f82e': ['NekodrecLE'], 'a61d79b32a0130f4fd524f445c4dc914ec5b9774c7d6c5677cc74fb1157a7a54': ['WhitewaterLineLE'], '2391f2a849a2eff169c61052630068bf42385e927d5a48e4a7bd9dbb4774f74f': ['ConcordLE'], 'f247001862014f6c2c473457476c7ef5b2693d715b3e0146f5c82ade9a846408': ['FlashbackLE'], '764a74ff92b7d0cc0c716e7da5b6c88e0dc4fa53b37e376b5691adc8ab224432': ['MultiprocessorLE'], '513575d01e0d759bf920547e9215bed44ee3069df909bbbd87f222957a9573fd': ['CanyonofTribulation'], '7ac6f01c3126a6ed4f026cf5fb240b827721270e8e64c8bb1db7a7e8d0c496e9': ['JungleDepthsLE'], 'fa11a4efd5091422890aa1546b57780d4205916ec5a9a95bfae662928ecfe911': ['RhoskallianLE'], 'b0d90c0cc48a2ac9bf0a8a39b97b876d796dab12508ac197916a183e336f508a': ['NightscapeLE'], 'd7eb587df717c97aa7e05f35427309fd89b816addf173c37dc02219ac6930253': ['EmeraldCityLE'], 'ad95f1cc44305e9f9bf7faa8585b933a761f5bda9eb4f6c9af85d9a8b5c87dbf': ['PillarsofGoldLE'], '<KEY>': ['DeathauraLE', 'AuradaMorteLE'], '<KEY>': ['EverDreamLE'], 'ce8cf817f15d8d2c7643b5dda9c245db35eb1ecd51010bc690928ac3bbc6c1aa': ['IceandChromeLE'], '1c66aed3a77a7bc5c11ea53ac5b91ab2c8a7c818bb1a30f62e81ca826001ea6a': ['EternalEmpireLE', 'ImpérioEternoLE'], '9383508a4d295bd1368d6cd2d1ef46737c95f98073c4d5579476c1e255bf9853': ['SubmarineLE'], '7caa6beba271187a4f7268fa8907ffdf6a90ccd6f4581b550fd0e4c2b6072a4e': ['HeavyArtilleryLE'], 'c100441d71c1f0885bbb6ffa6d9d2af45a1593ab7d39d713ec35974aa593b7e8': ['DivergenceLE'], 'f8a4957efb903d052ffe8a1b7c8037b371e6471b090b24a539f52508eedb9eb7': ['ArcticDreamLE'], 'd085a8bc006881ee5bd66208d8201ac8f9afd9352fa7603de1d36babd176159e': ['ArcticDreamLE', 'ArktischerTraumLE', 'АрктическийсонРВ', 'ArktycznySenER', 'RêvearctiqueEC'], 'f62c091a427c065df46c28e6e40d7e002fd0b2250492b79153bf77fb4eea1f62': ['HeavyArtilleryLE', 'ТяжелаяартиллерияРВ'], '0a6abed1b79209ff9c4b4a6a27160f7b11759e1334e830f3eff175a0a3e3e8d2': ['SentinelLE'], 'b04f63511dd26c04439de9cfa2c5765011c0e64ad9e0b1c493df36f1a1dfbb49': ['DivergenceLE', 'РасхождениеРВ', 'DivergenceEC'], '2190ada728e38829845a2eb7665e5267023e59ef07cbc0d123537f34fcb21253': ['EfflorescenceLE', 'BlütenstandLE', 'ЦветениеРВ', 'EfflorescenceEC'], '82f76f29e7c59678532b3d874d294c09233bb4f529a316196885a6b0910ce3b7': ['BoneTempleLE'], '<KEY>': ['DieSmaragdgrüneStadtLE', 'EmeraldCityLE', 'CitéémeraudeEC'], 'f014c981f74a0f041132ab2767c34af1d7b9dc25463c5070ef0c422d67bcbd7d': ['RhoskallianLE', 'РоскаллианРВ'], '3e7269771c1e27a5aefcb5b011b551aed84d63c1d46256344998bf407faa3dad': ['XelNagaCaverns[MacroOrDie]'], '64ac03a337efbef29e1f1b6db7f3f1636ecf367d525b9d3711b1e91cd26b10db': ['MacroV1'], 'fe5ab4aa8300046de42858624b39082c846ccaf16b1b949c979cfc022b8073fc': ['NightscapeLE', 'PaysagenocturneEC'], 'b22f4038ac9ec22d00b737cd584198a005ea9b78d037ee80b8597c72f95408d6': ['SentinelLE', 'WächterLE'], 'a23c2a99d6ed91184bfc36b43bce5cb9ec6f32afa664898043e17347b2da4232': ['GoldenWallLE'], 'f1f2e04b2086290437dc261d609d77eb238c870887e0e85afa81ac80c2e0610d': ['AugustineFallLE', 'AutomneaugustinEC'], '2479954a78e153d2d54500d0f1047ddfb674d0629ff3ac299b1347e7b70cc465': ['FortitudeLE'], 'dc0e7946109d57b1524d8ad992d3c34c9b7b0e858447fe8d07a7e4ff7ba3cd64': ['MementosLE'], 'c1c723b8135cc1cb7904c625f3bcc8bb052895e059384101bc8484c42a8d441d': ['BoneTempleLE'], 'ea170e4fba0494c886983d150d4cf71ca38199ae8f17dfc566654871c08b0165': ['FlashbackLE'], '6005ea69dad6bf8d542c6c72f2bc7279b551093c9c5e2a3bfb0f109c88809eee': ['ConcordLE'], '09f324de3d0e84855c8aa084b46dcb0c0faca30fdcee75c7adb7a2ef65cd1523': ['AugustineFallLE'], 'a645913af84b974f5cf98bfee737479eb09957bb2296da42522ab98320cf928a': ['CanyonofTribulation'], 'a2f344294925d94d0c04b470c3800527eb05e05b82be48501d9011d183fd6213': ['FlashbackLE', 'Flash-backEC'], 'eadebf382255b87bfa4fa588175ba0ac18a8879aa9ca1c8f506916cb9043b0b7': ['WhitewaterLineLE'], 'ee1d5c0acc1b3a6304979fa094f1354901311b47da0f049ad8e84f1c68b40360': ['JungleDepthsLE'], '9b73cb979dd0c8387298a363a68a6d1bec9418d35c413b6f4bda180a4b9cbda9': ['CanyonofTribulation', 'CanyondelaTribulation'], '43e09c2abf13af0b74d9d6dba0f4e1ddc03324f00621258bd722ad46e39d8b7b': ['OldEstate'], '655544b446b09f913c48c0098b4db02f5aa93c782eb6a8fffb75f02d6be6ebec': ['ShipwreckedLE'], '4e7c42883be1f2ccb0fd30aeba1d1db2f5a9abe84816a00d085b0a3357efb6fc': ['Infestation', 'Contamination'], 'cf1581fed1e9cd0d0e7a41ea9969eae325177aa7ac29c555cd6f8a541791b5fc': ['DaedalusPoint'], 'ff081a1c9f04a5b9181ddccf7e1dea9784f54fc3bee5f946222dee192da8614c': ['MultiprocessorLE'], 'b78bcf699c3ad22c52009ae414a323bc643d947757a7202811f88d098a594ec5': ['ConcordLE'], '5e346c4538481a18b882939053ade8c518fdd528738ec47b070d73536bd267fe': ['NeoSchool-Apotheosis(LOTV)'], '7b4953051859b8daf3ed6fb56f1ca7a9223994069c1eba742df5fd85e43961a8': ['NewAntioch'], '3d1373d04ca077ffd4bf54e4cea652fc9d61cd5ce3f670f8e628d448ff705557': ['CatallenaLE'], 'af58e9490a7e6f570df24faeb30799f9d5bbfc76b692bc5ff4d5ce87ae8badca': ['BrückenkopfLE'], '1d25f40eb73b9367a6e610df4ad65136d814c97985496d31b0779394ebe26dae': ['EchoLE'], '13b7fdb521a628103e4a8549cc82f99184fe3d17f9155ab37dbee2d615f7465e': ['EiserneFestungLE', 'ForteressedeferEC'], 'cb90c161d2291c0cf4d1d4f1f207edae9647605a5ee1bf7082459413bc246153': ['ForschungsstationVaani'], '7bc0462c880d084a7c34e42da4d74c024cafc97f989b9242e6df144469ce6d22': ['HeartoftheSwarm-Training:Ebene1'], '838719f4dc4fef9c400d09a849add628eebac868fc5a31cd239c2b9b3e247344': ['HeartoftheSwarm-Training:Ebene2'], '52dbec53531c8d4e9f9895a5f2e9c294bb45a79b1fb275616338870b879e2b36': ['HeartoftheSwarm-Training:Ebene3'], 'c5cb3fb8299b489b6d06fabf7f13d7daded643909144c7065aff2fea53833192': ['HeartoftheSwarm-Training:Ebene3'], '5c46a658866d18c953c3a75ecfbbdf08166241cae0a1679413898837696f40e2': ['Hotkeys-Trainingmap'], '<KEY>': ['Infernoteiche'], '<KEY>': ['MicroArenaEU'], '<KEY>': ['KaktustalLE'], '<KEY>': ['MondsüchtigLE', 'ClairdelunefanatiqueEC'], '09d82bb6496876d569ddd188190843be3250690ac444c8144f610777d045587e': ['SpacePortByWargirl#29'], 'a43e2955082327696cf4aeead0e3be99264ff01f12166bfac8c77fe26f2b2a7d': ['TerraformLE', 'TerraformationEC'], '637fee08ac26c38db39093a9640c3763c3800cc5bd066f6e11e47ef35cf1e775': ['TemplarTV'], '5a2f3561087373da49bfc801724d2b7f2baf1c741d55dc29fada3806b32efdca': ['TodestraktLE'], '8d70a29f7e0f1a3e6bfb86b4705ba31dc9e33a7424e9e018d9c595254ad7cf1b': ['TodestraktLE(2.1.8Spielbalancev1.0)'], '<KEY>': ['VerborgeneQuelle'], '100fd85153d236fbe613ad589ac13c8ab0bfb4c5858c947a1e1e9a79bc137591': ['VerscholleneExpedition'], 'b4e4d3994ee12ce0212c6703950eed3a61b6f5beb93b72f7b6a9aeeb5d7e2d0a': ['WildwuchsLE', 'LuxurianceEC'], '5524bab2333b13cc28f37414d38808a04cc4185a30139cc66ef65a5b10eec543': ['ZwietrachtIV'], '9588d33f3078315815ed50e1b24429ed735836c3a507c3c18ad28a818dbf72aa': ['NeoMT-ProbertoSeras(Multitask)'], 'b79cf508f459a39a8408f0d5b5850367cb707050269e8e932546af539b39b6ea': ['ÎledescontrebandiersEC'], 'd80e571ba232e1de63e78cefebc1eff54e2b14be9ca8cf293da9c449fcdd65b2': ['YeonsuEC'], '0a1cf69decde4d8076682af2f39fb8e0be9eead35e48a8408b7b3475ba3f2b66': ['MementosLE'], 'c63c0ed808dd4de4c30197d1f90870e00152ec92617837973a85572f46e532ba': ['AugustineFallLE', 'AutomneaugustinEC'], '62e23dd7cd049d685a49c50e5b1a8d452353934dac1ac37769805456d3d9ba32': ['EfflorescenceEC', 'EfflorescenceLE'], 'd1c822795ad0c9448c2c9f4aaba14c77242599a8def1408bccf7361d5e72ff7e': ['CitéémeraudeEC', 'EmeraldCityLE', 'DieSmaragdgrüneStadtLE'], '57ff2334bad50913a76050e349e33f0f73025cbcc1223a92684b3de2fd8325e7': ['PaysagenocturneEC', 'NightscapeLE'], '0ae64975ae2c5352c7b7aeecad0024ba4a36bfa7b141109e356d52bf376537eb': ['RhoskalliaEC', 'RhoskallianLE'], '233c5925dbdf6b6b9c805fafba7e4f15af57aa1920c42a44b8abf3b472beb5cb': ['OxideLE'], '<KEY>': ['LightshadeLE'], '0ac9c9f4fca256471653dd1ca6820d1f72162177935fcb8effb3bb0451308151': ['HeavyArtilleryLE', 'SchwereArtillerieLE', 'ArtillerielourdeEC'], '<KEY>': ['ArcticDreamLE', 'RêvearctiqueEC', 'ArktischerTraumLE'], '13cff31baa2f274e7777575366da594490786347b8e7da4cfcd58a0cf1e99439': ['NeonVioletSquareLE'], 'f219ce528fe8fce93670eab1d4a312bfe417658df037c53f04a539cc3423fcd0': ['TerminalstellaireEC'], '966d378b6e4e08566a06b74c38f7472fe630df1495a4d36246b09a7268d4dff3': ['SentinelleEC', 'SentinelLE'], '12976a9c3e1551bd29ed458dc155fea81dce89d548775273ef9464f208081771': ['DivergenceLE', 'DivergenceEC'], '<KEY>': ['PassedesrapidesEC', 'WhitewaterLineLE'], '3a638dfe5d59d08782aed879d049cc9042326bfdf24269d35fbf4186d102bf7b': ['TempledesosEC', 'BoneTempleLE'], '3d7faec636f8ee856456dd2c6fa58f0f744779f8a1bcded215046d496c428149': ['FlashbackLE', 'Flash-backEC'], 'b7e63e966349168dfe910ea6c621fc9bcf6c451f7f84c863e331bb1695a30993': ['MultiprocessorLE'], 'da6349d0b55c82cf31c563ae42561f99061e4a9cdaaac3b55b85e6863cd09653': ['FortitudeLE'], '50c7b91a4a59c15c07090df5a965870f618ecd31121c2d7ca3e10899001f974c': ['CanyondelaTribulation'], '01c51c758428ee338f9a627bc7353505437e883971f0c629c88b579ce2314067': ['ShipwreckedLE'], 'e1d6e0314e8eb931d83ab00331af5a497237a034739aef2d1ca62af7b8943d65': ['DaybreakLE(Void)'], '85524c3a4a8bac50350837e23d9ab669ddbab850bbaa8a75841b37cbd12283c9': ['JungleluxurianteEC', 'JungleDepthsLE'], 'd10340faee5f343d5ad1151e30c46ad3385f8940db19760df58675bb9e8169ac': ['PaysagenocturneEC', 'NachtlandschaftLE', 'NightscapeLE', 'НочнойпейзажРВ'], '5fe0ddc81aa08eb962a11d5cf9ee0046efa5a6d94b0e271e615487b1c178976f': ['CanyondelaTribulation', 'CanyonofTribulation'], '2a18ea5fdbc8aa50c84a39567864494b39c55f4f05c619a2b23940c7d9bff6dd': ['AutomneaugustinEC', 'AugustineFallLE', 'AugustinsFallLE', 'ОбрывАвгустинаРВ'], '42fae53b41a5653a8b7cc64a100603de641ecc4cfc127e0e0f6b374fec616751': ['HeavyArtilleryLE', 'ArtillerielourdeEC', 'SchwereArtillerieLE'], '559e4b41c45e30987787d54bd7ab015185a27e5045ae2a2a804a113ca35a7041': ['PolkaLE', 'PolkaEC', 'ПолькаРВ'], '4853f7846dd0c1219ada0287bacab840924b050a09e109495f56356794f0c61f': ['RhoskallianLE', 'RhoskalliaEC'], '568eb218274a1c8e0937efff8c229cc052d252172ae698c2e13ae17c4a2a69fa': ['ArcticDreamLE', 'RêvearctiqueEC', 'ArktischerTraumLE', 'АрктическийсонРВ'], 'f7186df71115f0daefbf7ab4d7628ba62aeece22e274a68ee2a5219055388ce0': ['SALTEmeraldPlaza'], 'd100d29cb499e544185eac6b4cf70ed07093bf1f78c1d041469d649e5653f537': ['TempledesosEC', 'BoneTempleLE', 'KnochentempelLE', 'ХрамкостейРВ'], '94b96dbab0a232a21c4352b99ede831d36013d264a534dfe57ca95b5cf6e9e2c': ['SentinelleEC', 'SentinelLE', 'ДозорныйРВ'], '51f2c9626802f915406e9fc932929dd2a1151a37274dedf833db55eb2e7ddf7d': ['EfflorescenceLE', 'EfflorescenceEC', 'BlütenstandLE'], 'd74453aeddf49cb0df9238f28ad7dd11eb090ed77fe79afe1abc276b4317e31a': ['Flash-backEC', 'FlashbackLE'], '83f793d565e8c494904687da0d90542bce053834f9c2b9b6804517366cf04f2a': [ 'BigGameHunters=InfinitePopulationandExtraResources='], 'f2a4bf46bcfecc39d0d942c2e6b998998ae565343247f03788556a1de65797ff': ['SimulationderéalitésEC', 'RealitiesSimulationLE'], '8b78642604deaf0b3f05c80c4f9317ef32b596f04f59afd89dd82b78bcad48b5': ['PassedesrapidesEC', 'WhitewaterLineLE', 'ПорогиРВ'], 'fd118a29fa1c7bb19e944dfa151f91993acd14a518b508f5ce65b95e281c7e72': ['CitéémeraudeEC', 'EmeraldCityLE', 'DieSmaragdgrüneStadtLE'], '7b06cec36412c4d5d0c0329f41a5ba970574bbd63759ab9cf3e0f99a4d03c943': ['TuonelaLE', 'TuonelaEC'], 'a84e29437f40edbfa2801afdb8aec3b08f71c13ba43648a4e5b6b485cca9e41e': ['ArktischeFelder'], '22cc3f270933af5648cbb902feea47c82a8401f9830e15c6de1df1f30c6a1927': ['RecuerdosEJ', 'RéminiscencesEC'], '94ab3d3fb91757a7a1e18d4d9877c39998d3c18735e27e9d8c3f7e07056a37dd': ['SimulacrumLE'], '8ee4f925727d3c0af92e2d35339b09b90264773042215a7e1266cf332fdc6ca3': ['qxcsStutterSteppracticemap'], '8a50a8ae5a82f090f87cad49cf5afccd37514a004ad0650b8b0d2dc8f5c6905c': ['NekodrecLE'], '82936560f83641139ce32f1fdb1ae47332b5d535bfdd6a2281158743da172663': ['FortitudeLE'], '77d4dcdd43d8e2bffb12e88f3251a6cb0f4fdfb7625714da0733aa902e21da4b': ['BoneTempleLE'], '<KEY>': ['WhitewaterLineLE'], 'fe04347ec1089f2eb0d3b2c54577896254cd69c254eb0a85a88dcb05f0b7e2fb': ['JungleDepthsLE'], 'e029083b9fda0b99658cfb3ad0ecbbb0231d272ca9c14b479721881e7f89be6e': ['WhitewaterLineLE'], '2fc45225a463ddade87c2d007008589865f03170fd6ab6703cf8aa76665ed9b5': ['NekodrecLE'], '19b75e0efa19269d7a73a8fec5a9870ad6100239cfdd21bf524aa830a6a2d071': ['JungleDepthsLE'], '27d7fb269cafec43e7c9c02199e4ff0d2081d8bc29e44bd6bba3a5048aeb8c10': ['CanyonofTribulation'], '5faffbb8fecb707f93241c859f63c6c1bf8a78b5b9293941f5790331e40896c5': ['OldEstate'], 'e6d919c71dac5ae7b071eabfab4792cb39f0751386425dde5d4fe1df2fe77c24': ['ConcordLE', 'ConcordeEC'], '895601ad83ae1ce80f3aafc9ec37af7f9125023a631aca6bc6e3571166ee4170': ['ShakurasPlateau'], '<KEY>': ['Forced’âmeEC', 'FortitudeLE'], '<KEY>': ['NekodrecEC', 'NekodrecLE'], '<KEY>': ['ShipwreckedLE', 'NaufragésEC'], '6b24b1fe51df879cf4c3baa389574b0ece78a62a4c403eafafc1b99bfca44551': ['EfflorescenceLE', 'EfflorescenceEC', 'BlütenstandLE', 'ЦветениеРВ'], 'fc926d3e0b6dce5ab84806fce6e6c3d9f4d50ff90600c6965651f429ad60a958': ['HeavyArtilleryLE', 'SchwereArtillerieLE', 'ArtillerielourdeEC', 'ТяжелаяартиллерияРВ', 'ArtilleríapesadaEJ'], '8119723d3cc8552542ee27c7292714cddafffbf03f4be0833ba4ceb105ca0436': ['RhoskallianLE', 'RhoskalliaEC', 'РоскаллианРВ'], '<KEY>': ['EmeraldCityLE', 'DieSmaragdgrüneStadtLE', 'CitéémeraudeEC', 'ИзумрудныйгородРВ', 'SzmaragdoweMiastoER'], 'f786498027e092b74d9a840cfac5f1d89bd431271bf71f469ad1c676f72beaa4': ['FortitudeLE', 'Forced’âmeEC'], '55922002cbbc833d7d65040995826f09af458966ba30d19d5dd445a6e360a08a': ['TuonelaLE', 'TuonelaEC'], '5bb01dd30ef93266a2d4c33e2ffc82b65e51746eeae5a3f4a328463d54f70e05': ['PolkaLE', 'PolkaEC', 'ПолькаРВ'], '9e30f71268a93fcbddda6057cd267e9ad0c3e2f9e1e3d3c1f05d44a1b6731da5': ['AugustinsFallLE', 'AutomneaugustinEC', 'AugustineFallLE', 'ОбрывАвгустинаРВ'], 'e26abdb71a12cf2ff064df07a3ca5fb1a7e1cd98c0c6804bc46de4ca62e46cac': ['NightscapeLE', 'PaysagenocturneEC', 'NachtlandschaftLE', 'НочнойпейзажРВ', 'PaisajenocturnoEJ'], 'ff23bce2732c79f93b4961c920b3c967eb161ea5384f77a3e8a098f7d821153e': ['ConcordLE'], '4ff71d5369e54b92a0394a5d9d1408c5d710b20147d97b4151734eeb2d84cc32': ['FlashbackLE', 'Flash-backEC', 'ВоспоминаниеРВ'], '524d7a6c42fbb9bbfe7b1e34b703cde08e236b47b571eb0a1d2e30d26b700c15': ['SentinelleEC', 'SentinelLE', 'ДозорныйРВ'], 'ecd8214a334b0096a29afb75336d5dda736667b6978a60089986472802680b8c': ['TempledesosEC', 'BoneTempleLE', 'KnochentempelLE', 'ХрамкостейРВ'], '1bb36e625a517e5ea51f859f3be8ae276c6834573806468e069032165ca59277': ['NaufragésEC', 'ShipwreckedLE'], '9d59f314729d65edb8d85b75d4b667a38af7be50da1c517d9a4a780025a9a380': ['PassedesrapidesEC', 'WhitewaterLineLE', 'ПорогиРВ'], '<KEY>': ['OldEstate', 'Anciendomaine'], '023d9ff39064db60314c4adbd51338e38dfc3072f4f41147d26da4a32adac9ce': ['NekodrecLE', 'NekodrecEC'], '90fa144fe6378b95f87beea412bbc567e33694b9beaaa5273ead79f3b4e7bf79': ['RealitiesSimulationLE', 'SimulationderéalitésEC', 'СимуляцияреальностиРВ'], '076bb0c20a3b0a5029d67e0414963354d3773d975c96f0bd131e498ce1c674b4': ['CanyondelaTribulation', 'CanyonofTribulation', 'SchluchtderTrübsal', 'УщельеСкорби'], 'e7feaa6a5a808d1ba8118fe7fb11bc9d0dfb95ca8926ff7fb03c969ad9cf36c1': ['MementosLE', 'RéminiscencesEC'], 'a8646c24041db8ac19792da4149fafa1e1f2c2dd4fad2c6007183dd951739f25': ['[M]Lightshade'], 'c215c810131b7b4d253d372ceff8b8e195a258b426ff2e63bca16adb046f388e': ['Exildutraître'], '688eb2321f60e739e03aeeffe4857eb0341d2695349b20b467aaa3a35abfffe6': ['MicroFx'], '5585a7ec470831f5549c3406aa693d94fb7634523289d2de01ccd771b9daa670': ['CyberForestLE'], '4ff5cba2e7780fae169275db2ab610c7b37a8cb221ab9ad2f555f9691d49fbf4': ['IceandChromeLE'], '0a61ddcdc6d9fe06a9ca4834745e4dabd420085803817eb47ba9eb41409a2d8a': ['Avalanche'], '6f0ab906f488c1a191951660a7d6888af3345b5c2fd91080e9a6d35d29027b5d': ['Champsdemort'], '9a0f7fe7280cd1b515d4327af209017ea6aec2c34d3e156746ffc8026664746a': ['Champsdemort'], '492a989761b01bbf14fce0ead96336fd0294e53c0371cb6083397cdcf93c1f73': ['Champsdemort'], '503de7e7a8926e3a7ceff5b5e3907b85bd667e6cddd2d9551a05644b4c5eaa20': ['Contamination'], '8896575126a3965e72c8ef2de001354ed03b7b73d238bfc65dfc87506ec00b53': ['Contamination'], 'fad20b9652eaea821b9e867fdf7bff274fffb6590e39a8ed59b3c7f5b9890661': ['Contamination'], '<KEY>': ['CrêtedeLerilak'], '5b13647d11c3a8e4c00ca919c2910267349698487cc2702928d64539860fed8e': ['DomainedeWolfeIndustries'], 'f5c87d3539d0ac1fa661f07fea264e7e06fa9675fe248616a22f6191cde358f0': ['DomainedeWolfeIndustries'], 'c84ff67d6ce1f27008460b5f8edfe8d1de7a3582f024fe4377eb8b4478fd2d61': ['DomainedeWolfeIndustries'], '105f6da14939522e84b3379843fd1820a200a4a18776264e7ed3fdc435963c92': ['Expresspourl’Oubli'], 'f9f5a7ae5ae2a6a87f82279dfe3e84213bea993ee5649f7b4e34b65337c61594': ['Failledentelée'], '<KEY>': ['Fauxd’Amon'], '207d1c0fec1eb2f756a2454fc633dd122ce09cf3fb3eba9f89323626eb399a95': ['Îledumassacre'], '1030b054684284b41a8cc50a2a5182bc7e3d5263efadfe2aafc557835109fcbf': ['Jungleardente'], 'b992e7804d2989153674cb585a389a05c9294ae4aef106314aaad7e18016ca54': ['Jungleardente'], '9b4a84641b9317f373f74f0931d84703ce0dda10879f84e6020075d69401cfe9': ['Jungleardente'], 'cd2a06f8ce8254b326e6f03ae1bafb09836bfc4e47ae22a606ad96feabea0c66': ['L’ossuaire'], '60f4305486d9f8079a9fb18e7a5d043c74ebd609f111b1814da352687003777c': ['LaroutedeKorhal'], '82fef0d02cd8f49accf1fb693e1ce9f93250fcaddfc332f73f0e3d6a8d8baef6': ['Néo-GettysburgEC'], '16104ed7096732b6f1106bdd18b3879738b0df0775426d9d1f4f4ac558d159ca': ['NéphorI'], 'd5e940f325aa5de903b096e1314b26f7853ad319592aea0a4c4f430efba2b505': ['Omicron'], '8e3e5b20460a22865d33ea175fe2348b07d7bed56e7ed952f7dd3c5ff0caa41a': ['Omicron'], 'ff23d57d5aceac46eaad1cf85154d854533685129f50ad2848c91ac8a08e6c1f': ['Parésàtirer'], 'caca429fd3fd665e70cb0c9110ce875c739250e0b6005d16e92ed1a4309e9528': ['PlaceKatherine'], 'f9132d255668cb2b116deaddcf765bd4ca85221712da1f80eb9a78e3439bd7fa': ['PlaceKatherine'], 'c2be3d7d1111bb73f8c987d71fd352de00c811d41819304e5c2142837ccf4c74':
<filename>tools.py import datetime import functools import io import math import pathlib import pickle import re import uuid import imageio import gym import numpy as np import tensorflow as tf import tensorflow.compat.v1 as tf1 import tensorflow_probability as tfp from tensorflow.keras.mixed_precision import experimental as prec from tensorflow_probability import distributions as tfd import tfplot import logging class AttrDict(dict): __setattr__ = dict.__setitem__ __getattr__ = dict.__getitem__ class Module(tf.Module): def save(self, filename): values = tf.nest.map_structure(lambda x: x.numpy(), self.variables) with pathlib.Path(filename).open('wb') as f: pickle.dump(values, f) def load(self, filename): with pathlib.Path(filename).open('rb') as f: values = pickle.load(f) tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values) def get(self, name, actor, *args, **kwargs): # Create or get layer by name to avoid mentioning it in the constructor. if not hasattr(self, '_modules'): self._modules = {} if name not in self._modules: self._modules[name] = actor(*args, **kwargs) return self._modules[name] def nest_summary(structure): if isinstance(structure, dict): return {k: nest_summary(v) for k, v in structure.items()} if isinstance(structure, list): return [nest_summary(v) for v in structure] if hasattr(structure, 'shape'): return str(structure.shape).replace(', ', 'x').strip('(), ') return '?' def graph_summary(writer, fn, *args): step = tf.summary.experimental.get_step() def inner(*args): tf.summary.experimental.set_step(step) with writer.as_default(): fn(*args) return tf.numpy_function(inner, args, []) @tfplot.autowrap(figsize=(2, 2)) def plot_scatter(x: np.ndarray, y: np.ndarray, *, ax, min_v=-1, max_v=+1, color='red'): margin = .1 ax.scatter(x, y, s=5, c=color) ax.set_xlim(min_v - margin, max_v + margin) ax.set_ylim(min_v - margin, max_v + margin) ax.axis('off') @tfplot.autowrap(figsize=(2, 2)) def plot_step(x: np.ndarray, y: np.ndarray, *, ax, color='k', min_y=-1, max_y=1): margin = 0.1 ax.step(x, y, color=color) ax.text(x[0] + margin, min_y + margin, 'return={:.2f}'.format(np.sum(y))) ax.set_ylim(min_y - margin, max_y + margin) def lidar_to_image(scan, min_v=-1, max_v=+1, color: str = "k"): # shift pi/2 just to align for visualization angles = tf.linspace(math.pi / 2 - math.radians(270.0 / 2), math.pi / 2 + math.radians(270.0 / 2), scan.shape[-1])[::-1] batch_video = [] for b in range(scan.shape[0]): single_episode = [] for t in range(scan.shape[1]): x = scan[b, t, :] * tf.cos(angles) y = scan[b, t, :] * tf.sin(angles) data = plot_scatter(x, y, min_v=min_v, max_v=max_v, color=color)[:, :, :3] # no alpha channel single_episode.append(data) video = tf.stack(single_episode) batch_video.append(video) return tf.stack(batch_video) def reward_to_image(reward_data, min_y=-1, max_y=1): batch_video = [] for b in range(reward_data.shape[0]): r = reward_data[b, :] x = range(r.shape[0]) img = plot_step(x, r, min_y=min_y, max_y=max_y)[:, :, :3] # return RGBA image, then discard "alpha" channel batch_video.append(img) return tf.stack(batch_video) def flat_gif_summary(video, fps=10, name="lidar"): frames = [] for i in range(video.shape[0]): frames.append(video[i].numpy().astype(np.uint8)) imageio.mimsave('./{}.gif'.format(name), frames, fps=fps) def video_summary(name, video, step=None, fps=100): name = name if isinstance(name, str) else name.decode('utf-8') if np.issubdtype(video.dtype, np.floating): video = np.clip(255 * video, 0, 255).astype(np.uint8) B, T, H, W, C = video.shape try: frames = video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C)) summary = tf1.Summary() image = tf1.Summary.Image(height=H * 3, width=W, colorspace=C) image.encoded_image_string = encode_gif(frames, fps) summary.value.add(tag=name + '/gif', image=image) tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step) except (IOError, OSError) as e: print('GIF summaries require ffmpeg in $PATH.', e) frames = video.transpose((0, 2, 1, 3, 4)).reshape((1, B * H, T * W, C)) tf.summary.image(name + '/grid', frames, step) def encode_gif(frames, fps): from subprocess import Popen, PIPE h, w, c = frames[0].shape pxfmt = {1: 'gray', 3: 'rgb24'}[c] cmd = ' '.join([ f'ffmpeg -y -f rawvideo -vcodec rawvideo', f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex', f'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse', f'-r {fps:.02f} -f gif -']) proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE) for image in frames: proc.stdin.write(image.tostring()) out, err = proc.communicate() if proc.returncode: raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')])) del proc return out def simulate(agents, env, config, datadir, writer, prefix='train', steps=0, episodes=0, sim_state=None, agents_ids=None): if agents_ids is None: agents_ids = ['A'] n_agents = len(agents_ids) # these are used to collect statistic of the first agent only cum_reward = 0.0 # episode level episode_progresses = [] # episode level max_progresses = [] # collection level cum_rewards = [] # collection level main_id = agents_ids[0] # the agent w.r.t. we collect statistics # Initialize or unpack simulation state. if sim_state is None: step, episode = 0, 0 dones = {agent_id: True for agent_id in agents_ids} length = np.zeros(n_agents, np.int32) obs = {agent_id: None for agent_id in agents_ids} agent_states = {agent_id: None for agent_id in agents_ids} else: step, episode, dones, length, obs, agent_states = sim_state cum_reward = {id: 0.0 for id in agents_ids} while (steps and step < steps) or (episodes and episode < episodes): # Reset envs if necessary. if any(dones.values()): obs = env.reset() if len(episode_progresses) > 0: # at least 1 episode max_progresses.append(max(episode_progresses)) cum_rewards.append(cum_reward) cum_reward = 0.0 # Step agents. obs = {id: {k: np.stack([v]) for k, v in o.items()} for id, o in obs.items()} actions = dict() for i, agent_id in enumerate(agents_ids): actions[agent_id], agent_states[agent_id] = agents[i](obs[agent_id], np.stack([dones[agent_id]]), agent_states[agent_id]) actions[agent_id] = np.array(actions[agent_id][0]) assert len(actions) == len(agents_ids) # Step envs. obs, rewards, dones, infos = env.step(actions) # update episode-level information cum_reward = cum_reward + rewards[main_id] episode_progresses.append(infos[main_id]['lap'] + infos[main_id]['progress'] - 1) done = any(dones.values()) episode += int(done) length += 1 # episode length until termination step += (int(done) * length).sum() # num sim steps length *= (1 - done) # when the loop is over, write statistics for the 1st agent metrics_dict = {'progress': max_progresses, 'return': cum_rewards} summarize_collection(metrics_dict, config, datadir, writer, prefix) # Return new state to allow resuming the simulation. return (step - steps, episode - episodes, dones, length, obs, agent_states), np.mean(cum_rewards) def summarize_collection(metrics_dict, config, datadir, writer, prefix): for metric_name, metric_list in metrics_dict.items(): metrics = [(f'{prefix}/{metric_name}_mean', np.mean(metric_list)), (f'{prefix}/{metric_name}_std', np.std(metric_list))] step = count_episodes(datadir)[1] * config.action_repeat with writer.as_default(): # Env might run in a different thread. tf.summary.experimental.set_step(step) [tf.summary.scalar(k, v) for k, v in metrics] def count_videos(directory): filenames = directory.glob('**/*.mp4') return sum(1 for _ in filenames) def count_episodes(directory): filenames = directory.glob('*.npz') lengths = [int(n.stem.rsplit('-', 1)[-1]) - 1 for n in filenames] episodes, steps = len(lengths), sum(lengths) return episodes, steps def count_steps(datadir, config): return count_episodes(datadir)[1] * config.action_repeat def load_episodes(directory, rescan, length=None, balance=False, seed=0): directory = pathlib.Path(directory).expanduser() random = np.random.RandomState(seed) cache = {} while True: for filename in directory.glob('*.npz'): if filename not in cache: try: with filename.open('rb') as f: episode = np.load(f) episode = {k: episode[k] for k in episode.keys()} except Exception as e: print(f'Could not load episode: {e}') continue cache[filename] = episode keys = list(cache.keys()) for index in random.choice(len(keys), rescan): episode = cache[keys[index]] if length: total = len(next(iter(episode.values()))) available = total - length if available < 1: print(f'[Info] Skipped short episode of length {available}.') continue if balance: index = min(random.randint(0, total), available) else: index = int(random.randint(0, available + 1)) # +1 for include the last step in the sampled episode episode = {k: v[index: index + length] for k, v in episode.items()} yield episode def preprocess(obs, config): dtype = prec.global_policy().compute_dtype obs = obs.copy() with tf.device('cpu:0'): if 'image' in obs: obs['image'] = tf.cast(obs['image'], dtype) / 255.0 - 0.5 if 'lidar' in obs: obs['lidar'] = tf.cast(obs['lidar'], dtype) / 15.0 - 0.5 if 'lidar_occupancy' in obs: # note: when using `lidar_occupancy` the reconstruction models return a Bernoulli distribution # for this reason, we don't center the observation in 0, but let it in [0, 1] obs['lidar_occupancy'] = tf.cast(obs['lidar_occupancy'], dtype) if 'reward' in obs: clip_rewards = dict(none=lambda x: x, tanh=tf.tanh, clip=lambda x: tf.clip_by_value(x, config.clip_rewards_min, config.clip_rewards_max))[ config.clip_rewards] obs['reward'] = clip_rewards(obs['reward']) return obs def load_dataset(directory, config): episode = next(load_episodes(directory, 1)) types = {k: v.dtype for k, v in episode.items()} shapes = {k: (None,) + v.shape[1:] for k, v in episode.items()} generator = lambda: load_episodes( directory, config.train_steps, config.batch_length, config.dataset_balance) dataset = tf.data.Dataset.from_generator(generator, types, shapes) dataset = dataset.map(functools.partial(preprocess, config=config)) dataset = dataset.batch(config.batch_size, drop_remainder=True) dataset = dataset.prefetch(10) return dataset class SampleDist: def __init__(self, dist, samples=100): self._dist = dist self._samples = samples @property def name(self): return 'SampleDist' def __getattr__(self, name): return getattr(self._dist, name) def mean(self): samples = self._dist.sample(self._samples) return tf.reduce_mean(samples, 0) def mode(self): sample = self._dist.sample(self._samples) logprob = self._dist.log_prob(sample) return tf.gather(sample, tf.argmax(logprob))[0] def entropy(self): sample = self._dist.sample(self._samples) logprob = self.log_prob(sample) return -tf.reduce_mean(logprob, 0) class OneHotDist: def __init__(self, logits=None, probs=None): self._dist = tfd.Categorical(logits=logits, probs=probs) self._num_classes = self.mean().shape[-1] self._dtype = prec.global_policy().compute_dtype @property def name(self): return 'OneHotDist' def __getattr__(self, name): return getattr(self._dist, name) def prob(self, events): indices = tf.argmax(events, axis=-1) return self._dist.prob(indices) def log_prob(self, events): indices = tf.argmax(events, axis=-1) return self._dist.log_prob(indices) def mean(self): return self._dist.probs_parameter() def mode(self): return self._one_hot(self._dist.mode()) def sample(self, amount=None): amount = [amount] if amount else [] indices = self._dist.sample(*amount) sample = self._one_hot(indices) probs = self._dist.probs_parameter() sample += tf.cast(probs -
<filename>ZS4Mic/load_functions/prepare_for_zoominterpolation.py #WORKING import os import sys sys.path.insert(0,'/content/ZoomInterpolation/load_functions') from skimage import io import numpy as np from tqdm import tqdm import shutil from aicsimageio import AICSImage, imread import time import random from aicsimageio import AICSImage, imread from aicsimageio.writers import png_writer from aicsimageio.writers.ome_tiff_writer import OmeTiffWriter from timeit import default_timer as timer import imageio import tifffile from aicsimageio.transforms import reshape_data from datetime import datetime def downsample_z_creation(img_path_list, file_num, sub_save_location): os.chdir(sub_save_location) t, z, y_dim,x_dim, img, use_RGB = load_img(img_path_list[file_num]) # folder_steps = str(file_num) + "_steps" img_nr = img_path_list[file_num].split("/")[-1].split(".")[0].split("-")[1][:3] fr_nr = img_path_list[file_num].split("/")[-1].split(".")[0].split("-")[2][:2] #create new directory-path for num_t in tqdm(range(0,t)): folder_name = "i-{}_".format(img_nr) + "f-{}_".format(fr_nr) + "t-%03d"%(num_t) os.chdir(sub_save_location) folder = os.path.join(sub_save_location,folder_name) os.mkdir(folder) os.chdir(folder) for num_z in range(z): if (num_z % 2) == 0: #create new directory-path file_name = ("dz_%03d" %(num_z)) # #here put the image pngs into the folder (instead of creating the folder) # #convert image to unit8 otherwise warning if use_RGB == False: img_save_1 = img[num_t,num_z, :, :] img_save_1 = create_3D_image(img_save_1, x_dim, y_dim) # img_save_1 = convert(img_save_1, 0, 255, np.uint8) elif use_RGB == True: img_save_1 = img[num_t,num_z, :, :, :] # img_save_1 = convert(img_save_1, 0, 255, np.uint8) # # saving images as PNG io.imsave("{}.png".format(file_name), img_save_1) #save the last slide on top labeled with x if num_z == z-1 and (num_z % 2) != 0: file_name = ("dz_%03d" %(num_z)) # #here put the image pngs into the folder (instead of creating the folder) # #convert image to unit8 otherwise warning if use_RGB == False: img_save_1 = img[num_t,num_z, :, :] img_save_1 = create_3D_image(img_save_1, x_dim, y_dim) # img_save_1 = convert(img_save_1, 0, 255, np.uint8) elif use_RGB == True: img_save_1 = img[num_t,num_z, :, :, :] # img_save_1 = convert(img_save_1, 0, 255, np.uint8) # # saving images as PNG io.imsave("{}-x.png".format(file_name), img_save_1) def downsample_t_creation(img_path_list, file_num, sub_save_location): os.chdir(sub_save_location) t, z, y_dim,x_dim, img, use_RGB = load_img(img_path_list[file_num]) # folder_steps = str(file_num) + "_steps" img_nr = img_path_list[file_num].split("/")[-1].split(".")[0].split("-")[1][:3] fr_nr = img_path_list[file_num].split("/")[-1].split(".")[0].split("-")[2][:2] #create new directory-path for num_z in tqdm(range(0,z)): folder_name = "i-{}_".format(img_nr) + "f-{}_".format(fr_nr) + "z-%03d"%(num_z) os.chdir(sub_save_location) folder = os.path.join(sub_save_location,folder_name) os.mkdir(folder) os.chdir(folder) for num_t in range(t): if (num_t % 2) == 0: #create new directory-path file_name = ("dt_%03d" %(num_t)) # #here put the image pngs into the folder (instead of creating the folder) # #convert image to unit8 otherwise warning if use_RGB == False: img_save_1 = img[num_t,num_z, :, :] img_save_1 = create_3D_image(img_save_1, x_dim, y_dim) # img_save_1 = convert(img_save_1, 0, 255, np.uint8) elif use_RGB == True: img_save_1 = img[num_t,num_z, :, :, :] # img_save_1 = convert(img_save_1, 0, 255, np.uint8) # # saving images as PNG io.imsave("{}.png".format(file_name), img_save_1) #save the last slide on top labeled with x if num_t == t-1 and (num_t % 2) != 0: file_name = ("dt_%03d" %(num_t)) if use_RGB == False: # #here put the image pngs into the folder (instead of creating the folder) # #convert image to unit8 otherwise warning img_save_1 = img[num_t,num_z, :, :] img_save_1 = create_3D_image(img_save_1, x_dim, y_dim) # img_save_1 = convert(img_save_1, 0, 255, np.uint8) elif use_RGB == True: img_save_1 = img[num_t,num_z, :, :, :] # img_save_1 = convert(img_save_1, 0, 255, np.uint8) # # saving images as PNG io.imsave("{}-x.png".format(file_name), img_save_1) def upsample_t_creation(img_path_list, file_num, sub_save_location, folder_option): # to differentiate between zoom and normal upsampling in t dim if folder_option =="zoom": marker = "z" else: marker = "u" os.chdir(sub_save_location) t, z, y_dim,x_dim, img, use_RGB = load_img(img_path_list[file_num]) # folder_steps = str(file_num) + "_steps" img_path = img_path_list[file_num] img_nr = img_path.split("/")[-1].split(".")[0].split("-")[1][:3] fr_nr = img_path.split("/")[-1].split(".")[0].split("-")[2][:2] #create new directory-path for num_z in tqdm(range(0,z)): # dim_2 = zdimension folder_name = "i-{}_".format(img_nr) + "f-{}_".format(fr_nr) + "z-%03d"%(num_z) # z doesn't need to be the z dimension because it is also used for the t dimension os.chdir(sub_save_location) folder = os.path.join(sub_save_location,folder_name) os.mkdir(folder_name) os.chdir(folder_name) for num_t in range(t): #create new directory-path file_name = (f"{marker}t_%03d" %(num_t)) # #here put the image pngs into the folder (instead of creating the folder) # #convert image to unit8 otherwise warning if use_RGB == False: img_save_1 = img[num_t,num_z, :, :] img_save_1 = create_3D_image(img_save_1, x_dim, y_dim) # img_save_1 = convert(img_save_1, 0, 255, np.uint8) elif use_RGB == True: img_save_1 = img[num_t,num_z, :, :, :] # img_save_1 = convert(img_save_1, 0, 255, np.uint8) # # saving images as PNG io.imsave("{}.png".format(file_name), img_save_1) # writer1.save(img_save_1) def upsample_z_creation(img_path_list, file_num, sub_save_location): os.chdir(sub_save_location) t, z, y_dim,x_dim, img, use_RGB = load_img(img_path_list[file_num]) #dim_1=t, dim_2=z # folder_steps = str(file_num) + "_steps" img_nr = img_path_list[file_num].split("/")[-1].split(".")[0].split("-")[1][:3] fr_nr = img_path_list[file_num].split("/")[-1].split(".")[0].split("-")[2][:2] # folder_file_path = os.path.join(sub_save_location,file_to_folder_name) # os.mkdir(folder_file_path) #create new directory-path for num_t in tqdm(range(0,t)): folder_name = "i-{}_".format(img_nr) + "f-{}_".format(fr_nr) + "z-%03d"%(num_t) os.chdir(sub_save_location) folder = os.path.join(sub_save_location,folder_name) os.mkdir(folder_name) os.chdir(folder_name) for num_z in range(z): #create new directory-path file_name = ("uz_%03d"%(num_z)) # #convert image to unit8 otherwise warning if use_RGB == False: img_save_1 = img[num_t,num_z, :, :] img_save_1 = create_3D_image(img_save_1, x_dim, y_dim) # img_save_1 = convert(img_save_1, 0, 255, np.uint8) elif use_RGB == True: img_save_1 = img[num_t,num_z, :, :, :] # img_save_1 = convert(img_save_1, 0, 255, np.uint8) # # saving images as PNG io.imsave("{}.png".format(file_name), img_save_1) def get_img_path_list(img_path_list, img_folder_path): ''' Creates a list of image-path that will be used for loading the images later''' flist = os.listdir(img_folder_path) flist.sort() for i in flist: img_slice_path = os.path.join(img_folder_path, i) img_path_list.append(img_slice_path) return img_path_list # img_path_list = get_img_path_list_T(img_path_list, filepath, folder_list) # img_path_list def load_img(img_path): img = io.imread(img_path) if img.shape[-1]==3: use_RGB = True t, z, y_dim, x_dim, _ = img.shape print("This image will be processed as a RGB image") else: use_RGB = False t, z, y_dim, x_dim = img.shape print("The image dimensions are: " + str(img.shape)) return t, z, y_dim,x_dim, img, use_RGB def make_folder_with_date(save_location, name): today = datetime.now() if today.hour < 12: h = "00" else: h = "12" sub_save_location = save_location + "/" + today.strftime('%Y%m%d')+ "_"+ today.strftime('%H%M%S')+ "_%s"%name os.mkdir(sub_save_location) return sub_save_location def create_3D_image(img, x_dim, y_dim): # creates 3D image with 3 times the same values for RGB because the NN was generated for normal rgb images dim(3,x,y) # print(img.shape) image_3D = np.zeros((y_dim,x_dim,3), dtype=np.uint8) image_3D[:,:,0] = img image_3D[:,:,1] = img image_3D[:,:,2] = img return image_3D # def convert(img, target_type_min, target_type_max, target_type): # # this function converts images from float32 to unit8 # imin = img.min() # imax = img.max() # a = (target_type_max - target_type_min) / (imax - imin) # b = target_type_max - a * imax # new_img = (a * img + b).astype(target_type) # return new_img #prepare_split_aug_images.py #UPDATED CHECK from skimage import io import numpy as np from tqdm import tqdm import shutil import os from aicsimageio import AICSImage, imread import shutil import time import numpy import random from aicsimageio import AICSImage, imread from aicsimageio.writers import png_writer from tqdm import tqdm from google.colab.patches import cv2_imshow from aicsimageio.writers.ome_tiff_writer import OmeTiffWriter from tqdm import tqdm from timeit import default_timer as timer import imageio import tifffile from aicsimageio.transforms import reshape_data from datetime import datetime from tempfile import mkstemp from shutil import move, copymode from os import fdopen, remove class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def make_folder_with_date(save_location, name): today = datetime.now() if today.hour < 12: h = "00" else: h = "12" sub_save_location = save_location + "/" + today.strftime('%Y%m%d%H')+ "_"+ today.strftime('%H%M%S')+ "_%s"%name os.mkdir(sub_save_location) return sub_save_location def diplay_img_info(img, divisor, use_RGB): ### display image data nr_z_slices = img.shape[1] nr_timepoints = img.shape[0] x_dim = img.shape[-2] y_dim = img.shape[-2] x_div = x_dim//divisor y_div = y_dim//divisor print(img.shape) print("The Resolution is: " + str(x_dim)) print("The number of z-slizes is: " + str(nr_z_slices)) print("The number of timepoints: " + str(nr_timepoints)) if use_RGB: nr_channels = img.shape[-1] print("The number of channels: " + str(nr_channels)) nr_channels = 1 else: nr_channels = 1 return nr_z_slices, nr_channels, nr_timepoints, x_dim, y_dim, x_div, y_div def correct_channels(img): '''For 2D + T (with or without RGB) a artificial z channel gets created''' if img.shape[-1] ==3: use_RGB = True else: use_RGB = False if len(img.shape) ==4 and use_RGB: t, x, y, c = img.shape zeros = np.zeros((t,1,y,x,c), dtype=np.uint8) zeros[:,0,:,:,:] = img img = zeros elif len(img.shape) ==3 and not use_RGB: t, x, y = img.shape zeros = np.zeros((t,1,y,x), dtype=np.uint8) zeros[:,0,:,:] = img img = zeros return img, use_RGB def change_train_file(zoomfactor, model_path): """This function changes the resolution value in the file: Vimeo7_dataset.py""" file_path_2 = "/content/ZoomInterpolation/codes/test_new.py" fh_2, abs_path_2 = mkstemp() with fdopen(fh_2,'w') as new_file: with open(file_path_2) as old_file: for counter, line in enumerate(old_file): if counter ==27: new_file.write(f" scale =
0 or invalid_node is not None: log_nodes = list() for idx in range(nodes): log_nodes.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) if invalid_node is not None: log_nodes.append("invalid:8091") log_nodes = ",".join(log_nodes) if initialized: cli = CouchbaseCLI(server, server.rest_username, server.rest_password) _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, server.rest_password, None) self.assertTrue(success, "Cluster initialization failed during test setup") if init_num_servers > 1: time.sleep(5) _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, None) self.assertTrue(errored, "Could not add initial servers") _, _, errored = cli.rebalance(None) self.assertTrue(errored, "Unable to complete initial rebalance") time.sleep(5) cli = CouchbaseCLI(server, username, password) stdout, _, errored = cli.collect_logs_start(all_nodes, log_nodes, upload, upload_host, upload_customer, upload_ticket) if not expect_error: self.assertTrue(errored, "Expected command to succeed") else: self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") def testCollectLogStop(self): username = self.input.param("username", None) password = self.input.param("password", None) initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() if initialized: cli = CouchbaseCLI(server, server.rest_username, server.rest_password) _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, server.rest_password, None) self.assertTrue(success, "Cluster initialization failed during test setup") time.sleep(5) cli = CouchbaseCLI(server, username, password) stdout, _, errored = cli.collect_logs_stop() if not expect_error: self.assertTrue(errored, "Expected command to succeed") else: self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") def test_mctimings_with_data_monitoring_role(self): """ This role only works from 5.1 and later params: sasl_buckets=2,default_bucket=False,nodes_init=2, permission=self_bucket if permission=other_bucket, need to add should-fail=True """ if 5.1 > float(self.cb_version[:3]): self.log.info("This test only work for version 5.1+") return if len(self.buckets) < 2: self.fail("This test requires minimum of 2 buckets") permission = self.input.param("permission", "all") username = "data_monitoring" bucket_names = [] bucket_name = "" rest = RestConnection(self.master) shell = RemoteMachineShellConnection(self.master) for bucket in self.buckets: bucket_names.append(bucket.name) if permission == "all": role = '*' bucket_name = bucket_names[random.randint(0, 1)] elif permission == "self_bucket": role = "{0}".format(bucket_names[0]) bucket_name = bucket_names[0] elif permission == "other_bucket": role = "{0}".format(bucket_names[1]) bucket_name = bucket_names[0] testuser = [{"id": username, "name": username, "password": "password"}] rolelist = [{"id": username, "name": username, "roles": "data_monitoring[{0}]".format(role)}] kv_gen = BlobGenerator('create', 'create', self.value_size, end=self.num_items) self._load_all_buckets(self.master, kv_gen, "create", self.expire_time, flag=self.item_flag) try: status = self.add_built_in_server_user(testuser, rolelist) if not status: self.fail("Failed to add user: {0} with role: {1} "\ .format(username, role)) cmd = self.cli_command_path + "mctimings" + self.cmd_ext cmd += " -h " + self.master.ip + ":11210 -u " + username cmd += " -P password -b " + bucket_name + " --verbose " output, _ = shell.execute_command(cmd) if not self.should_fail: self.assertTrue(self._check_output("The following data is collected", output)) else: if self._check_output("The following data is collected", output): self.fail("This user should not allow to monitor data in this bucket {0}"\ .format(bucket_name)) else: self.log.info("Alright, user bucket A has no permission to check bucket B") except Exception as e: print(e) finally: shell.disconnect() if status: self.log.info("Remove user {0}".format(rolelist)) RbacBase().remove_user_role(["data_monitoring"], rest) def test_cmd_set_stats(self): """ When set any items, cmd_set should increase counting number. params: default_bucket=False,sasl_buckets=1 /opt/couchbase/bin/cbstats localhost:11210 all -u Administrator -p password -b bucket0 | grep cmd_set cmd_set: 10011 """ shell = RemoteMachineShellConnection(self.master) cmd = self.cli_command_path + "cbstats" + self.cmd_ext + " " cmd += self.master.ip + ":11210 all -u Administrator -p password " cmd += "-b bucket0 | grep cmd_set" output, _ = shell.execute_command(cmd) self.assertTrue(self._check_output("0", output)) kv_gen = BlobGenerator('create', 'create', self.value_size, end=1000) self._load_all_buckets(self.master, kv_gen, "create", self.expire_time, flag=self.item_flag) output, _ = shell.execute_command(cmd) self.assertTrue(self._check_output("1000", output)) shell.disconnect() def testNodeInit(self): username = self.input.param("username", None) password = <PASSWORD>.param("password", None) data_path = self.input.param("data-path", None) index_path = self.input.param("index-path", None) hostname = self.input.param("hostname", None) initialized = self.input.param("initialized", False) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() node_settings = rest.get_nodes_self() if self.os == "windows": self.log_path = self.log_path.replace("/cygdrive/c/", "c:/") if data_path is not None: if data_path == "valid": data_path = self.log_path elif self.os == "windows" and data_path[:1] == "/": data_path = "c:" + data_path if index_path is not None: if index_path == "valid": index_path = self.log_path elif self.os == "windows" and index_path[:1] == "/": index_path = "c:" + index_path if initialized: cli = CouchbaseCLI(server, server.rest_username, server.rest_password) _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, server.rest_password, None) self.assertTrue(success, "Cluster initialization failed during test setup") time.sleep(5) cli = CouchbaseCLI(server, username, password) stdout, _, errored = cli.node_init(data_path, index_path, hostname) if not expect_error: self.assertTrue(errored, "Expected command to succeed") if data_path is None: data_path = node_settings.storage[0].path elif self.os == "windows": data_path = data_path.replace("\\", "")[:-1] if index_path is None: index_path = node_settings.storage[0].index_path elif self.os == "windows": index_path = index_path.replace("\\", "")[:-1] self.assertTrue(self.verify_node_settings(server, data_path, index_path, hostname), "Node settings not changed") elif self.os != "windows": self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") def testGroupManage(self): username = self.input.param("username", None) password = self.input.param("password", None) create = self.input.param("create", None) delete = self.input.param("delete", None) list = self.input.param("list", None) move = self.input.param("move-servers", 0) rename = self.input.param("rename", None) name = self.input.param("name", None) from_group = self.input.param("from-group", None) to_group = self.input.param("to-group", None) initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") init_group = self.input.param("init-group", None) init_num_servers = self.input.param("init-num-servers", 1) invalid_move_server = self.input.param("invalid-move-server", None) server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() to_move = None if move > 0: to_move = [] for idx in range(move): to_move.append("%s:%s" % (self.servers[idx].ip, self.servers[idx].port)) to_move = ",".join(to_move) if invalid_move_server: to_move = invalid_move_server servers_to_add = [] for idx in range(init_num_servers-1): servers_to_add.append("{0}"\ .format(self._convert_server_to_url(self.servers[idx + 1]))) servers_to_add = ",".join(servers_to_add) if initialized: cli = CouchbaseCLI(server, server.rest_username, server.rest_password) _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, server.rest_password, None) self.assertTrue(success, "Cluster initialization failed during test setup") if init_num_servers > 1: time.sleep(5) _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, None) self.assertTrue(errored, "Could not add initial servers") _, _, errored = cli.rebalance(None) self.assertTrue(errored, "Unable to complete initial rebalance") if init_group is not None: time.sleep(5) _, _, errored = cli.group_manage(True, False, False, None, None, init_group, None, None) time.sleep(5) cli = CouchbaseCLI(server, username, password) stdout, _, errored = cli.group_manage(create, delete, list, to_move, rename, name, to_group, from_group) if not expect_error: self.assertTrue(errored, "Expected command to succeed") if create: self.assertTrue(self.verifyGroupExists(server, name), "Group doesn't exist") elif delete: self.assertTrue(not self.verifyGroupExists(server, name), "Group doesn't exist") elif rename: self.assertTrue(self.verifyGroupExists(server, rename), "Group not renamed") elif move > 0: _, _, errored = cli.group_manage(False, False, False, to_move, None, None, from_group, to_group) self.assertTrue(errored, "Group reset failed") else: self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") def testRecovery(self): username = self.input.param("username", None) password = self.input.param("password", None) servers = self.input.param("servers", 0) recovery_type = self.input.param("recovery-type", None) initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") skip_failover = self.input.param("skip-failover", False) init_num_servers = self.input.param("init-num-servers", 1) invalid_recover_server = self.input.param("invalid-recover-server", None) server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() servers_to_recover = None if servers > 0: servers_to_recover = [] for idx in range(servers): servers_to_recover.append("{0}:{1}".format(self.servers[idx+1].ip, self.servers[idx+1].port)) servers_to_recover = ",".join(servers_to_recover) if invalid_recover_server: servers_to_recover = invalid_recover_server servers_to_add = [] for idx in range(init_num_servers - 1): server_add = "{0}".format(self._convert_server_to_url(self.servers[idx + 1])) servers_to_add.append(server_add) servers_to_add = ",".join(servers_to_add) if initialized: cli = CouchbaseCLI(server, server.rest_username, server.rest_password) _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, server.rest_password, None) self.assertTrue(success, "Cluster initialization failed during test setup") if init_num_servers > 1: time.sleep(5) _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, None) self.assertTrue(errored, "Could not add initial servers") _, _, errored = cli.rebalance(None) self.assertTrue(errored, "Unable to complete initial rebalance") if servers_to_recover and not skip_failover: for restore_server in servers_to_recover.split(","): _, _, errored = cli.failover(restore_server, True) self.assertTrue(errored, "Unable to failover servers") time.sleep(5) cli = CouchbaseCLI(server, username, password) stdout, _, errored = cli.recovery(servers_to_recover, recovery_type) if not expect_error: self.assertTrue(errored, "Expected command to succeed") self.assertTrue(self.verifyRecoveryType(server, servers_to_recover, recovery_type), "Servers not recovered") else: self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") def testServerReadd(self): username = self.input.param("username", None) password = self.input.param("password", None) servers = self.input.param("servers", 0) initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") skip_failover = self.input.param("skip-failover", False) init_num_servers = self.input.param("init-num-servers", 1) invalid_recover_server = self.input.param("invalid-recover-server", None) server = copy.deepcopy(self.servers[0]) if len(self.servers) < 4: mesg = "***\n Sever readd tests need minimum 4 servers to run\n***" RemoteMachineShellConnection(server).stop_current_python_running(mesg) rest = RestConnection(server) rest.force_eject_node() servers_to_recover = None if servers > 0: servers_to_recover = [] for idx in range(servers): servers_to_recover.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) servers_to_recover
<reponame>jbasko/aws-sfn-builder import json from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union from uuid import uuid4 import dataclasses from bidict import bidict from jsonpath_ng import parse as parse_jsonpath from .base import Node from .choice_rules import ChoiceRule def _generate_name(): return str(uuid4()) class States: """ Namespace for all names of states. """ Pass = "Pass" Task = "Task" Choice = "Choice" Wait = "Wait" Succeed = "Succeed" Fail = "Fail" Parallel = "Parallel" Sequence = "Sequence" Machine = "Machine" ALL = [ Pass, Task, Choice, Wait, Succeed, Fail, Parallel, Sequence, Machine, ] _TERMINAL = [ Succeed, Fail, # + any End State ] _INTERNAL = [ Sequence, Machine, ] @classmethod def is_terminal(cls, state: "State"): return state.next is None or state.type in cls._TERMINAL @classmethod def is_internal(cls, state: "State"): return state.type in cls._INTERNAL @dataclasses.dataclass class State(Node): _FIELDS = bidict( **Node._FIELDS, **{ "type": "Type", "comment": "Comment", "next": "Next", "end": "End", "resource": "Resource", "input_path": "InputPath", "output_path": "OutputPath", "result_path": "ResultPath", }, ) # Our fields are not part of States Language and therefore # should not be included in the compiled definitions, but # are accepted in the input. _OUR_FIELDS = bidict({ "name": "Name", }) obj: Any = None # TODO Rename it to raw_obj name: str = dataclasses.field(default_factory=_generate_name) type: Type = None comment: str = None next: str = None end: bool = None resource: str = None input_path: str = None output_path: str = None result_path: str = None @classmethod def parse(cls, raw: Any, **fields) -> "State": # Dictionary with no type defaults to Task which is most likely state # that user wants to instantiate. if not isinstance(raw, State): fields.setdefault("type", States.Task) return super().parse(raw, **fields) def compile(self, **compile_options) -> Dict: c = super().compile(**compile_options) # Do not include "Type" for our internal "states" such as "Machine" or "Sequence". if States.is_internal(self) and "Type" in c: del c["Type"] # TODO Rethink this feature. if hasattr(self.obj, 'get_state_attrs'): c.update(getattr(self.obj, 'get_state_attrs')(state=self)) state_visitor = compile_options.get('state_visitor') if state_visitor is not None: state_visitor(self, c) return c def format_state_input(self, input): """ Applies InputPath """ if self.input_path: return parse_jsonpath(self.input_path).find(input)[0].value return input def format_result(self, input, resource_result): """ Applies ResultPath """ if self.result_path: result_path = parse_jsonpath(self.result_path) if not result_path.find(input): # A quick hack to set a non-existent key (assuming the parent of the path is a dictionary). result_path.left.find(input)[0].value[str(result_path.right)] = resource_result return input elif str(result_path) == "$": return resource_result else: result_path.update(input, resource_result) return input return resource_result def format_state_output(self, result): """ Applies OutputPath """ if not self.output_path: return result output_path = parse_jsonpath(self.output_path) if str(output_path) == "$": # From docs: # If the OutputPath has the default value of $, this matches the entire input completely. # In this case, the entire input is passed to the next state. return result else: output_matches = output_path.find(result) if output_matches: # From docs: # If the OutputPath matches an item in the state's input, only that input item is selected. # This input item becomes the state's output. assert len(output_matches) == 1 return output_matches[0].value else: # From docs: # If the OutputPath doesn't match an item in the state's input, # an exception specifies an invalid path. raise NotImplementedError() def execute(self, input, resource_resolver: Callable=None) -> Tuple[Optional[str], Any]: resource_input = self.format_state_input(input) resource_result = resource_resolver(self.resource)(resource_input) result = self.format_result(input, resource_result) return self.next, self.format_state_output(result) def dry_run(self, trace: List): trace.append(self.name) return self.next @dataclasses.dataclass class Pass(State): _FIELDS = bidict( **State._FIELDS, **{ "result": "Result", }, ) type: str = States.Pass result: str = None result_path: str = None def compile_dict(self, c: Dict): if self.next is None: c["End"] = True @dataclasses.dataclass class Task(Pass): # Inherits from Pass because it has almost all of the same fields + Retry & Catch _FIELDS = bidict( **Pass._FIELDS, **{ "retry": "Retry", "catch": "Catch", "timeout_seconds": "TimeoutSeconds", "heartbeat_seconds": "HeartbeatSeconds", }, ) type: str = States.Task retry: List = None catch: List = None timeout_seconds: int = None heartbeat_seconds: int = None @dataclasses.dataclass class Choice(State): _FIELDS = bidict( **State._FIELDS, **{ "choices": "Choices", "default": "Default", }, ) type: str = States.Choice choices: List[ChoiceRule] = dataclasses.field(default_factory=list) default: str = None @classmethod def parse_dict(cls, d: Dict, fields: Dict) -> None: fields["choices"] = [ChoiceRule.parse(raw_choice_rule) for raw_choice_rule in d["Choices"]] def execute(self, input, resource_resolver: Callable): for choice_rule in self.choices: if choice_rule.matches(input): return choice_rule.next, input return self.default, input @dataclasses.dataclass class Wait(State): _FIELDS = bidict( **State._FIELDS, **{ "seconds": "Seconds", "seconds_path": "SecondsPath", "timestamp": "Timestamp", "timestamp_path": "TimestampPath", }, ) type: str = States.Wait seconds: int = None seconds_path: str = None timestamp: str = None timestamp_path: str = None def compile_dict(self, c: Dict): if self.next is None: c["End"] = True def execute(self, input, resource_resolver: Callable=None): # TODO We don't actually do any waiting here, but perhaps we could delegate it to some predefined resource. state_input = self.format_state_input(input) state_output = self.format_state_output(state_input) return self.next, state_output @dataclasses.dataclass class Fail(State): _FIELDS = bidict( **State._FIELDS, **{ "cause": "Cause", "error": "Error", }, ) type: str = States.Fail cause: str = None error: str = None def execute(self, input, resource_resolver: Callable=None): # TODO No idea what should we do here. return None, None @dataclasses.dataclass class Succeed(State): type: str = States.Succeed @dataclasses.dataclass class Parallel(Task): _FIELDS = bidict( **Task._FIELDS, **{ "branches": "Branches", }, ) type: str = States.Parallel branches: List["Sequence"] = dataclasses.field(default_factory=list) @classmethod def parse_list(cls, raw: List, **fields) -> "Parallel": assert isinstance(raw, List) return cls( branches=[Sequence.parse_list(raw_branch) for raw_branch in raw], **fields, ) @classmethod def parse_dict(cls, d: Dict, fields: Dict) -> None: fields["branches"] = [State.parse(raw_branch, type="Sequence") for raw_branch in d["Branches"]] def compile_dict(self, c: Dict): if self.next is None: c["End"] = True def dry_run(self, trace: List): parallel_trace = [] for branch in self.branches: branch_trace = [] branch.dry_run(branch_trace) parallel_trace.append(branch_trace) trace.append(parallel_trace) return self.next @dataclasses.dataclass class Sequence(State): _FIELDS = bidict( **State._FIELDS, **{ "start_at": "StartAt", "states": "States", }, ) type: str = States.Sequence start_at: str = None states: Dict[str, State] = dataclasses.field(default_factory=dict) @property def start_at_state(self) -> State: return self.states[self.start_at] @classmethod def parse_list(cls, raw: List, **fields) -> "State": if not isinstance(raw, list): raise TypeError(raw) if raw and all(isinstance(item, list) for item in raw): assert not fields return Parallel.parse_list(raw) else: states = [] for raw_state in raw: if isinstance(raw_state, list): states.append(Sequence.parse_list(raw_state)) else: states.append(Task.parse(raw_state)) for i, state in enumerate(states[:-1]): state.next = states[i + 1].name return cls( start_at=states[0].name if states else None, states={s.name: s for s in states}, **fields, ) @classmethod def parse_dict(cls, d: Dict, fields: Dict) -> None: fields["states"] = {k: State.parse(v, name=k) for k, v in d["States"].items()} def dry_run(self, trace): state = self.states[self.start_at] while state is not None: state = self.states.get(state.dry_run(trace)) return self.next def insert(self, raw, before: str=None, after: str=None): new_state = State.parse(raw) if before: assert not after assert new_state.name != before inserted = False if self.start_at == before: self.start_at = new_state.name inserted = True for state in self.states.values(): if state.next == before: state.next = new_state.name inserted = True if not inserted: raise ValueError(before) new_state.next = before self.states[new_state.name] = new_state elif after: assert not before assert new_state.name != after new_state.next = self.states[after].next self.states[after].next = new_state.name self.states[new_state.name] = new_state else: raise NotImplementedError() def remove(self, name: str): removed_state = self.states[name] for state in self.states.values(): if state.next == name: state.next = removed_state.next if self.start_at == name: self.start_at = removed_state.next del self.states[name] def append(self, raw): new_state = State.parse(raw) if not self.states: self.states[new_state.name] = new_state self.start_at = new_state.name return terminal_states = [s for s in self.states.values() if not s.next] if not terminal_states: raise ValueError("Sequence has no terminal state, cannot append reliably") self.states[new_state.name] = new_state # There can be more than one terminal state. for s in terminal_states: s.next = new_state.name @dataclasses.dataclass class Machine(Sequence): _FIELDS = bidict( **Sequence._FIELDS, **{ "version": "Version", "timeout_seconds": "TimeoutSeconds", }, ) type: str = States.Machine timeout_seconds: int = None version: str = None @classmethod def parse(cls, raw: Union[List, Dict], **fields) -> "Machine": if isinstance(raw, list): sequence = super().parse_list(raw, **fields) if isinstance(sequence, Parallel): return cls(start_at=sequence.name, states={sequence.name: sequence}, **fields) assert isinstance(sequence, Machine) return sequence elif isinstance(raw, dict): # Proper state machine definition return Sequence.parse(raw, type=States.Machine, **fields) else: raise TypeError(raw) def to_json(self, json_options=None, state_visitor: Callable[[State, Dict], None]=None): """ Generate a JSON that can be used as a State Machine definition. If you need to customise the generated output, pass state_visitor which will be called for every compiled state dictionary.
#!/usr/bin/python ''' Shader assembler. Usage: asm.py --isa-file ../rnndb/isa.xml in.asm out.bin ''' # Copyright (c) 2012-2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sub license, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the # next paragraph) shall be included in all copies or substantial portions # of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import print_function, division, unicode_literals import argparse,struct import sys from binascii import b2a_hex import re from etnaviv.util import rnndb_path from etnaviv.parse_rng import parse_rng_file, format_path, BitSet, Domain from etnaviv.asm_common import DstOperand, DstOperandAReg, DstOperandMem, SrcOperand, SrcOperandImm, TexOperand, AddrOperand, Instruction, AMODES, COMPS, RGROUPS, set_imm from etnaviv.asm_common import disassemble, format_instruction from etnaviv.disasm import disasm_format from etnaviv.asm_defs import Model, Flags, Dialect reg_re = re.compile('^(i|t|u|a|tex|\?4\?|\?5\?|\?6\?|\?7\?)(\d+)(\[.*?\])?(\.[\_xyzw]{1,4})?$') mem_re = re.compile('^mem(\.[\_xyzw]{1,4})?$') label_re = re.compile('^[a-zA-Z\-\_][0-9a-zA-Z\-\_]*$') int_re = re.compile('^[0-9]+$') def parse_amode(amode): if not amode: return 0 return AMODES.index(amode[1:-1]) def parse_comps(comps): if not comps: return 15 return ((('x' in comps)<<0)|(('y' in comps)<<1)|(('z' in comps)<<2)|(('w' in comps)<<3)) def parse_swiz(swiz): if not swiz: return 0xe4 swiz = swiz[1:] # drop . rv = 0 for idx in xrange(4): if idx < len(swiz): comp = COMPS.index(swiz[idx]) rv |= comp << (idx * 2) return rv def parse_imm(s): '''Parse immediate. This accepts various integer formats (decimal, 0xhex, ... as accepted by int(s,0) ) or float. ''' try: return int(s,0) except ValueError: return float(s) def is_imm(s): ''' Return True if s could be parsed as immediate, False otherwise. ''' try: parse_imm(s) except ValueError: return False else: return True def assemble(isa, dialect, inst, warnings): fields = {} fields['OPCODE'] = inst.op & 0x3F fields['OPCODE_BIT6'] = (inst.op >> 6) & 0x01 fields['COND'] = inst.cond fields['SAT'] = inst.sat fields['TYPE_BIT2'] = inst.type >> 2 fields['TYPE_BIT01'] = inst.type & 3 if isinstance(inst.dst, DstOperandAReg): # XXX validate that this instruction accepts # address destination arguments fields['DST_REG'] = inst.dst.reg fields['DST_COMPS'] = inst.dst.comps elif isinstance(inst.dst, DstOperandMem): fields['DST_COMPS'] = inst.dst.comps elif isinstance(inst.dst, DstOperand): fields['DST_USE'] = inst.dst.use fields['DST_AMODE'] = inst.dst.amode fields['DST_REG'] = inst.dst.reg fields['DST_COMPS'] = inst.dst.comps elif inst.dst is None: fields['DST_USE'] = 0 else: warnings.append('Invalid destination argument') if inst.tex is not None: fields['TEX_ID'] = inst.tex.id fields['TEX_AMODE'] = inst.tex.amode fields['TEX_SWIZ'] = inst.tex.swiz if inst.addr is not None: fields['SRC2_IMM'] = inst.addr.addr for (idx, src) in enumerate(inst.src): if isinstance(src, SrcOperand): fields['SRC%i_USE' % idx] = src.use fields['SRC%i_REG' % idx] = src.reg fields['SRC%i_SWIZ' % idx] = src.swiz fields['SRC%i_NEG' % idx] = src.neg fields['SRC%i_ABS' % idx] = src.abs fields['SRC%i_AMODE' % idx] = src.amode fields['SRC%i_RGROUP' % idx] = src.rgroup elif isinstance(src, SrcOperandImm): fields['SRC%i_USE' % idx] = src.use set_imm(fields, idx, src.imm) # XXX check for colliding fields domain = isa.lookup_domain('VIV_ISA') rv = [0,0,0,0] for word in [0,1,2,3]: mask = 0 bitset = domain.lookup_address(word*4)[-1][0].type for field in bitset.bitfields: if field.name in fields: try: rv[word] |= field.fill(fields[field.name]) del fields[field.name] except ValueError,e: warnings.append(str(e)) for field in fields.iterkeys(): # warn if fields are not used, that's probably a typo warnings.append('Field %s not used' % field) return rv class Assembler(object): ''' Instruction assembler context. ''' labels = {} linenr = 0 instructions = None source = None def __init__(self, isa, dialect): self.isa = isa self.dialect = dialect self.errors = [] self.instructions = [] self.source = [] def parse(self, line): # remove comment self.source.append(line) self.linenr += 1 (line, _, _) = line.partition(';') # drop comments (label, _, line) = line.rpartition(':') # handle optional labels if label: label = label.strip() # Numeric labels are generated by the disassembler as line guides. # Check that these appear in the right line during re-assembly. If # not, this is only a source of bugs. if int_re.match(label): if len(self.instructions) != int(label): self.errors.append((self.linenr, 'Misplaced instruction number label: %s' % label)) else: if not label_re.match(label): self.errors.append((self.linenr, 'Invalid label: %s' % label)) self.labels[label] = len(self.instructions) line = line.strip() if not line: # empty line return None (inst, _, operands) = line.partition(' ') m = re.match('\s*([a-zA-Z0-9\.]+)\s*(.*?)\s*$', line) if not m: self.errors.append((self.linenr, 'Cannot parse line: %s' % line)) return None inst = m.group(1) operands = m.group(2) # uppercase, split into atoms inst = inst.upper().split('.') try: op = self.isa.types['INST_OPCODE'].values_by_name[inst[0]].value except KeyError: if inst[0].startswith('0X'): # hexdecimal (unknown) op op = int(inst[0][2:], 16) else: self.errors.append((self.linenr, 'Unknown instruction %s' % inst[0])) return None cond = 0 sat = False conditions = self.isa.types['INST_CONDITION'].values_by_name types = self.isa.types['INST_TYPE'].values_by_name type_ = 0 for atom in inst[1:]: if atom in conditions: cond = conditions[atom].value elif atom in types: type_ = types[atom].value elif atom == 'SAT': sat = True else: self.errors.append((self.linenr, 'Unknown atom %s' % atom)) return None operands = operands.split(',') src = [] dst = None tex = None addr = None for idx,operand in enumerate(operands): operand = operand.strip() neg = False abs = False if operand.startswith('-'): neg = True operand = operand[1:] if operand.startswith('|'): if not operand.endswith('|'): self.errors.append((self.linenr, 'Unterminated |')) abs = True operand = operand[1:-1] # check kind of operand # (t|u|a)XXX[.xyzw] (address)register match_reg = reg_re.match(operand) match_mem = mem_re.match(operand) if match_reg: (regtype, regid, amode, swiz) = match_reg.groups() regid = int(regid) try: amode = parse_amode(amode) except LookupError: self.errors.append((self.linenr, 'Unknown amode %s' % amode)) amode = 0 if idx == 0: # destination operand comps = parse_comps(swiz) if regtype == 't': dst = DstOperand(use=1, amode=amode, reg=regid, comps=comps) elif regtype == 'a': dst = DstOperandAReg(reg=regid, comps=comps) else: self.errors.append((self.linenr, 'Cannot have texture or uniform as destination argument')) else: # source operand try: swiz = parse_swiz(swiz) except LookupError: self.errors.append((self.linenr, 'Unparseable swizzle %s' % swiz)) swiz = 0 if regtype in RGROUPS: # register group if regtype == 'u': if regid < 128: rgroup = 2 else: rgroup = 3 regid -= 128 else: rgroup = RGROUPS.index(regtype) src.append(SrcOperand(use=1, reg=regid, swiz=swiz, neg=neg, abs=abs, amode=amode, rgroup=rgroup)) elif regtype == 'a': src.append(DstOperandAReg(reg=regid, comps=comps)) elif regtype == 'tex': tex = TexOperand(id=regid, amode=amode, swiz=swiz) else: self.errors.append((self.linenr, 'Unparseable register type %s' % regtype)) arg_obj = None elif match_mem: if idx == 0: # destination operand comps = parse_comps(match_mem.group(1)) dst = DstOperandMem(comps=comps) else: self.errors.append((self.linenr, 'Cannot have mem as source argument')) elif operand == 'void': #print('void') if idx == 0: # destination operand dst = None else: src.append(None) elif label_re.match(operand): # label (interpreted as immediate on gc3000+, as branch destination on gc2000) if self.dialect.model <= Model.GC2000: if idx == 3: # last operand (proxy for "is branch destination?") addr = AddrOperand(addr = operand) # will resolve labels later else: src.append(None) self.errors.append((self.linenr, 'Immediates not supported on GC2000 except for branch destination')) else: src.append(SrcOperandImm(use=1, imm=operand)) # will resolve labels later elif is_imm(operand): # immediate or direct address if self.dialect.model <= Model.GC2000: if idx == 3: # last operand addr = AddrOperand(addr = int(operand)) else: src.append(None) self.errors.append((self.linenr, 'Immediates not supported on GC2000 except for branch destination')) else: src.append(SrcOperandImm(use=1, imm=parse_imm(operand))) else: self.errors.append((self.linenr, 'Unparseable operand ' + operand)) num_operands = 1 + len(src) + (addr is not None) if num_operands != 4: self.errors.append((self.linenr, 'Invalid number of operands (%i)' % num_operands)) # TODO: sel sel = None inst_out = Instruction(op=op, cond=cond,sat=sat,type=type_, tex=tex,dst=dst,src=src,addr=addr,sel=None,unknowns={},linenr=self.linenr) self.instructions.append(inst_out) return inst_out def generate_code(self): rv = [] for inst in self.instructions: warnings = [] # fill in labels in addr operand if inst.addr is not None and isinstance(inst.addr.addr, (str,unicode)): try: addr = AddrOperand(self.labels[inst.addr.addr]) except LookupError: self.errors.append((inst.linenr, 'Unknown label ' + inst.addr.addr)) addr = AddrOperand(0) # dummy inst = inst._replace(addr=addr) # fill in labels in other operands for i,src in enumerate(inst.src): if isinstance(src, SrcOperandImm) and isinstance(src.imm, (str,unicode)): inst.src[i] = src._replace(imm=self.labels[src.imm]) inst_out = assemble(self.isa, self.dialect, inst, warnings) rv.append(inst_out)
= top_level_aggregation.order(self.sort_column, order) top_level_aggregation = top_level_aggregation.order("_term", order, reset=False) query = ( case_es.CaseES() .domain(self.domain) .user_ids_handle_unknown(user_ids) .size(0) ) if self.case_type: query = query.case_type(self.case_type) else: query = query.filter(filters.NOT(case_es.case_type('commcare-user'))) query = query.aggregation(top_level_aggregation) if self.missing_users: missing_aggregation = ( MissingAggregation('missing_users', 'user_id') .aggregation(self._touched_total_aggregation) .aggregation(self._active_total_aggregation) .aggregation(self._inactive_total_aggregation) ) missing_aggregation = self.add_landmark_aggregations(missing_aggregation, self.end_date) query = query.aggregation(missing_aggregation) return query.run() def add_landmark_aggregations(self, aggregation, end_date): for key, landmark in self.landmarks: aggregation = aggregation.aggregation(self.landmark_aggregation(key, landmark, end_date)) return aggregation def landmark_aggregation(self, key, landmark, end_date): start_date = ServerTime(self.utc_now - landmark).phone_time(self.timezone).done() return ( FilterAggregation(key, filters.AND( case_es.modified_range(gte=start_date, lt=end_date), ) ) .aggregation(FilterAggregation('active', case_es.is_closed(False))) .aggregation(FilterAggregation('closed', case_es.is_closed())) ) class Row(object): def __init__(self, report, user, bucket): self.report = report self.user = user self.bucket = bucket def active_count(self, landmark_key): if not self.bucket: return 0 else: landmark = self.bucket.result.get(landmark_key, None) if landmark: return landmark['active']['doc_count'] return 0 def modified_count(self, landmark_key): if not self.bucket: return 0 else: landmark = self.bucket.result.get(landmark_key, None) if landmark: return landmark['doc_count'] return 0 def closed_count(self, landmark_key): if not self.bucket: return 0 else: landmark = self.bucket.result.get(landmark_key, None) if landmark: return landmark['closed']['doc_count'] return 0 def total_touched_count(self): if not self.bucket: return 0 else: landmark = self.bucket.result.get('touched_total', None) if landmark: return landmark['doc_count'] return 0 def total_inactive_count(self): if not self.bucket: return 0 else: landmark = self.bucket.result.get('inactive_total', None) if landmark: return landmark['doc_count'] return 0 def total_active_count(self): if not self.bucket: return 0 else: landmark = self.bucket.result.get('active_total', None) if landmark: return landmark['doc_count'] return 0 def header(self): return self.report.get_user_link(self.user)['html'] class TotalRow(object): def __init__(self, es_results, header): self._header = header self.total_touched_bucket = es_results.aggregations.touched_total self.total_active_bucket = es_results.aggregations.active_total self.total_inactive_bucket = es_results.aggregations.inactive_total self.aggregations = es_results.aggregations def total_touched_count(self): return self.total_touched_bucket.doc_count def total_inactive_count(self): return self.total_inactive_bucket.doc_count def total_active_count(self): return self.total_active_bucket.doc_count def active_count(self, landmark_key): return getattr(self.aggregations, landmark_key).result['active']['doc_count'] def modified_count(self, landmark_key): return getattr(self.aggregations, landmark_key).doc_count def closed_count(self, landmark_key): return getattr(self.aggregations, landmark_key).result['closed']['doc_count'] def header(self): return self._header @location_safe class SubmissionsByFormReport(WorkerMonitoringFormReportTableBase, MultiFormDrilldownMixin, DatespanMixin, CompletionOrSubmissionTimeMixin): name = ugettext_noop("Submissions By Form") slug = "submissions_by_form" fields = [ 'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter', 'corehq.apps.reports.filters.forms.FormsByApplicationFilter', 'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter', 'corehq.apps.reports.filters.dates.DatespanFilter' ] fix_left_col = True emailable = True is_cacheable = True description = ugettext_noop("Number of submissions by form.") @classmethod def display_in_dropdown(cls, domain=None, project=None, user=None): if project and project.commtrack_enabled: return False else: return True @property def headers(self): headers = DataTablesHeader(DataTablesColumn(_("User"), span=3)) if not self.all_relevant_forms: headers.add_column( DataTablesColumn( _("No submissions were found for selected forms " "within this date range."), sortable=False ) ) else: for _form, info in self.all_relevant_forms.items(): help_text = None if info['is_fuzzy']: help_text = _("This column shows Fuzzy Submissions.") headers.add_column( DataTablesColumn( info['name'], sort_type=DTSortType.NUMERIC, help_text=help_text, ) ) headers.add_column( DataTablesColumn(_("All Forms"), sort_type=DTSortType.NUMERIC) ) return headers @property @memoized def selected_simplified_users(self): return _get_selected_users(self.domain, self.request) @property def rows(self): export = self.rendered_as in ('email', 'export') if util.is_query_too_big( self.domain, self.request.GET.getlist(EMWF.slug), self.request.couch_user, ) and not export: raise BadRequestError( _('Query selects too many users. Please modify your filters to select fewer than {} users').format( USER_QUERY_LIMIT, ) ) selected_users = self.selected_simplified_users track_es_report_load(self.domain, self.slug, len(self.selected_simplified_users)) totals = [0] * (len(self.all_relevant_forms) + 1) for simplified_user in selected_users: row = [] if self.all_relevant_forms: for form in self.all_relevant_forms.values(): row.append(self._form_counts[ (simplified_user.user_id, form['app_id'], form['xmlns']) ]) row_sum = sum(row) row = ( [self.get_user_link(simplified_user)] + [self.table_cell(row_data, zerostyle=True) for row_data in row] + [self.table_cell(row_sum, format_html("<strong>{}</strong>", row_sum))] ) totals = [totals[i] + col.get('sort_key') for i, col in enumerate(row[1:])] yield row else: yield [self.get_user_link(simplified_user), '--'] if self.all_relevant_forms: self.total_row = [_("All Users")] + totals yield self.total_row @property @memoized def _form_counts(self): mobile_user_and_group_slugs = self.request.GET.getlist(EMWF.slug) if (EMWF.show_all_mobile_workers(mobile_user_and_group_slugs) and self.request.can_access_all_locations): user_ids = [] else: user_ids = [simplified_user.user_id for simplified_user in self.selected_simplified_users] return get_form_counts_by_user_xmlns( domain=self.domain, startdate=self.datespan.startdate_utc.replace(tzinfo=pytz.UTC), enddate=self.datespan.enddate_utc.replace(tzinfo=pytz.UTC), user_ids=user_ids, xmlnss=[f['xmlns'] for f in self.all_relevant_forms.values()], by_submission_time=self.by_submission_time, export=self.rendered_as == 'export' ) @location_safe class DailyFormStatsReport(WorkerMonitoringReportTableBase, CompletionOrSubmissionTimeMixin, DatespanMixin): slug = "daily_form_stats" name = ugettext_lazy("Daily Form Activity") bad_request_error_text = ugettext_lazy( "Your search query was invalid. If you're using a large date range, try using a smaller one.") fields = [ 'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter', 'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter', 'corehq.apps.reports.filters.dates.DatespanFilter', ] description = ugettext_lazy("Number of submissions per day.") fix_left_col = False emailable = True is_cacheable = False ajax_pagination = True exportable_all = True datespan_max_days = 90 @classmethod def display_in_dropdown(cls, domain=None, project=None, user=None): if project and project.commtrack_enabled: return False else: return True @property @memoized def dates(self): date_list = [self.datespan.startdate] while date_list[-1] < self.datespan.enddate: date_list.append(date_list[-1] + datetime.timedelta(days=1)) return date_list @property def headers(self): if self.datespan.is_valid(): headers = DataTablesHeader(DataTablesColumn(_("Username"), span=3)) for d in self.dates: headers.add_column(DataTablesColumn(json_format_date(d), sort_type=DTSortType.NUMERIC)) headers.add_column(DataTablesColumn(_("Total"), sort_type=DTSortType.NUMERIC)) return headers else: return DataTablesHeader(DataTablesColumn(_("Error"))) @property def date_field(self): return 'received_on' if self.by_submission_time else 'time_end' @property def startdate(self): return self.datespan.startdate_utc if self.by_submission_time else self.datespan.startdate @property def enddate(self): return self.datespan.enddate_utc if self.by_submission_time else self.datespan.enddate_adjusted @property def is_submission_time(self): return self.date_field == 'received_on' def date_filter(self, start, end): return {'%s__range' % self.date_field: (start, end)} @property def shared_pagination_GET_params(self): params = [ dict( name=EMWF.slug, value=EMWF.get_value(self.request, self.domain)), dict( name=CompletionOrSubmissionTimeFilter.slug, value=CompletionOrSubmissionTimeFilter.get_value(self.request, self.domain)), dict(name='startdate', value=self.datespan.startdate_display), dict(name='enddate', value=self.datespan.enddate_display), ] return params @property def total_records(self): return len(self.selected_users) @property @memoized def selected_users(self): return _get_selected_users(self.domain, self.request) def paginate_list(self, data_list): if self.pagination: start = self.pagination.start end = start + self.pagination.count return data_list[start:end] else: return data_list def users_by_username(self, order): users = self.selected_users if order == "desc": users.reverse() return self.paginate_list(users) def users_by_range(self, datespan, order): if self.is_submission_time: get_counts_by_user = get_submission_counts_by_user else: get_counts_by_user = get_completed_counts_by_user if EMWF.show_all_mobile_workers(self.request.GET.getlist(EMWF.slug)): user_ids = None # Don't restrict query by user ID else: user_ids = [u.user_id for u in self.selected_users] results = get_counts_by_user(self.domain, datespan, user_ids) return self.users_sorted_by_count(results, order) def users_sorted_by_count(self, count_dict, order): # Split selected_users into those in count_dict and those not. # Sort the former by count and return users_with_forms = [] users_without_forms = [] for user in self.selected_users: u_id = user['user_id'] if u_id in count_dict: users_with_forms.append((count_dict[u_id], user)) else: users_without_forms.append(user) if order == "asc": users_with_forms.sort() sorted_users = users_without_forms sorted_users += [u[1] for u in users_with_forms] else: users_with_forms.sort(reverse=True) sorted_users = [u[1] for u in users_with_forms] sorted_users += users_without_forms return self.paginate_list(sorted_users) @property def column_count(self): return len(self.dates) + 2 @property def rows(self): if not self.datespan.is_valid(): return [[self.datespan.get_validation_reason()]] self.sort_col = self.request_params.get('iSortCol_0', 0) totals_col = self.column_count - 1 order = self.request_params.get('sSortDir_0') if self.sort_col == totals_col: users = self.users_by_range(self.datespan, order) elif 0 < self.sort_col < totals_col: start = self.dates[self.sort_col - 1] end = start + datetime.timedelta(days=1) users = self.users_by_range(DateSpan(start, end), order) else: users = self.users_by_username(order) track_es_report_load(self.domain, self.slug, len(users)) # Todo; this hits ES seperately for each user # should instead aggregate by user in one ES query rows = [self.get_row(user) for user in users] self.total_row = self.get_row() return rows @property def get_all_rows(self): rows = [self.get_row(user) for user in self.selected_users] self.total_row = self.get_row() return rows def get_row(self, user=None): """ Assemble a row for a given user. If no user is passed, assemble a totals row. """ if user: user_ids = [user.user_id] else: user_ids = [u.user_id for u in self.selected_users] if self.is_submission_time: get_counts_by_date = get_submission_counts_by_date else: get_counts_by_date = get_completed_counts_by_date results = get_counts_by_date( self.domain, user_ids, self.datespan, self.timezone, ) date_cols = [ results.get(json_format_date(date), 0) for date in self.dates ] styled_zero = mark_safe('<span class="text-muted">0</span>') # nosec: no user input styled_date_cols = [styled_zero if c == 0 else c for c in date_cols] first_col = self.get_raw_user_link(user) if user else _("Total") return [first_col] + styled_date_cols + [sum(date_cols)] def get_raw_user_link(self, user): from corehq.apps.reports.standard.inspect import SubmitHistory return _get_raw_user_link(user, SubmitHistory.get_url(domain=self.domain), filter_class=EMWF) @property def template_context(self): context = super(DailyFormStatsReport, self).template_context context.update({ 'hide_lastyear': True, }) return context @location_safe class FormCompletionTimeReport(WorkerMonitoringFormReportTableBase, DatespanMixin, CompletionOrSubmissionTimeMixin): name = ugettext_lazy("Form Completion Time") slug = "completion_times" fields = ['corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter', 'corehq.apps.reports.filters.forms.SingleFormByApplicationFilter', 'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter', 'corehq.apps.reports.filters.dates.DatespanFilter'] description = ugettext_lazy("Statistics on time spent on a particular form.") is_cacheable = True @property @memoized def selected_form_data(self): forms = list(FormsByApplicationFilter.get_value(self.request, self.domain).values()) if len(forms) == 1 and forms[0]['xmlns']: return forms[0] non_fuzzy_forms = [form for form in forms if not form['is_fuzzy']] if len(non_fuzzy_forms) == 1: return non_fuzzy_forms[0] @property def headers(self): if not self.selected_form_data: return DataTablesHeader(DataTablesColumn(_("No Form Selected"), sortable=False)) return DataTablesHeader(DataTablesColumn(_("User")), DataTablesColumn(_("Average"), sort_type=DTSortType.NUMERIC), DataTablesColumn(_("Std. Dev."), sort_type=DTSortType.NUMERIC), DataTablesColumn(_("Shortest"), sort_type=DTSortType.NUMERIC), DataTablesColumn(_("Longest"), sort_type=DTSortType.NUMERIC), DataTablesColumn(_("No. of Forms"), sort_type=DTSortType.NUMERIC)) @property def rows(self): rows = [] if not self.selected_form_data: rows.append([_("You must select a specific form to view data.")]) return rows def to_duration(val_in_s): assert val_in_s is not None return datetime.timedelta(milliseconds=val_in_s) def to_minutes(val_in_s): if val_in_s is None: return "--" return friendly_timedelta(to_duration(val_in_s)) def to_minutes_raw(val_in_s): """ return a timestamp like 66:12:24 (the first number is hours """ if val_in_s is None: return '--' td = to_duration(val_in_s) hours, remainder = divmod(td.seconds, 3600) minutes, seconds = divmod(remainder, 60) return '{h}:{m}:{s}'.format( h=(td.days * 24) + hours, m=minutes, s=seconds, ) def _fmt_ts(timestamp):
supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. **kwargs: Keyword arguments for creating a :class:`matplotlib.mpl_toolkits.basemap.Basemap`. By default, the domain bounds will be set to the native projection, the resolution will be set to 'l', and the other projection parameters will be set by the information in the file. Returns: :class:`cartopy.crs.Projection`: A Projection subclass for the map projection. Returns: :class:`matplotlib.mpl_toolkits.basemap.Basemap`: A Basemap object for the projection. See Also: :class:`matplotlib.mpl_toolkits.basemap.Basemap` """ return _get_proj_obj("basemap", var, wrfin, varname, timeidx, method, squeeze, cache, **kwargs) def get_pyngl(var=None, wrfin=None, varname=None, timeidx=0, method="cat", squeeze=True, cache=None, **kwargs): """Return a :class:`Ngl.Resources` object for the map projection. Args: var (:class:`xarray.DataArray`, optional): A :class:`xarray.DataArray` variable that includes latitude,longitude coordinate information. If not used, then *wrfin* must be provided. geobounds (:class:`wrf.GeoBounds`, optional): The geobounds to get the extents. If set to None and using the *var* parameter, the geobounds will be taken from the variable. If using a file, then the geobounds will be taken from the native grid. wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable, optional): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. If not used, then *var* must be provided. varname (:obj:`str`, optional): If using *wrfin*, then this will be the variable name to use to determine the geobounds. The variable can be a coordinate variable, or a regular variable that contains coordinate attributes. If None, then the 'XLAT', 'XLAT_M', 'XLONG', 'XLONG_M' variables will be used. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. Default is 0. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is 'cat'. squeeze (:obj:`bool`, optional): Set to False to prevent dimensions with a size of 1 from being automatically removed from the shape of the output. Default is True. cache (:obj:`dict`, optional): A dictionary of (varname, ndarray) that can be used to supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. **kwargs: Additional PyNGL resources to set while creating the :class:`Ngl.Resources` object. Returns: :class:`Ngl.Resources`: A dict-like object that contains the PyNGL resources for the map projection. See Also: `PyNGL <https://www.pyngl.ucar.edu/>`_ """ return _get_proj_obj("pyngl", var, wrfin, varname, timeidx, method, squeeze, cache) def cartopy_xlim(var=None, geobounds=None, wrfin=None, varname=None, timeidx=0, method="cat", squeeze=True, cache=None): """Return the x-axis limits in the projected coordinates. For some map projections, like :class`wrf.RotatedLatLon`, the :meth:`cartopy.GeoAxes.set_extent` method does not work correctly. This method is equivalent to: .. code-block:: python pc = crs.PlateCarree() xs, ys, _ = self._cartopy().transform_points(pc, np.array([geobounds.bottom_left.lon, geobounds.top_right.lon]), np.array([geobounds.bottom_left.lat, geobounds.top_right.lat])).T _xlimits = xs.tolist() _ylimits = ys.tolist() return (_xlimits, _ylimits)[0] Args: var (:class:`xarray.DataArray`, optional): A :class:`xarray.DataArray` variable that includes latitude,longitude coordinate information. If not used, then *wrfin* must be provided. geobounds (:class:`wrf.GeoBounds`, optional): The geobounds to get the extents. If set to None and using the *var* parameter, the geobounds will be taken from the variable. If using a file, then the geobounds will be taken from the native grid. wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable, optional): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. If not used, then *var* must be provided. varname (:obj:`str`, optional): If using *wrfin*, then this will be the variable name to use to determine the geobounds. The variable can be a coordinate variable, or a regular variable that contains coordinate attributes. If None, then the 'XLAT', 'XLAT_M', 'XLONG', 'XLONG_M' variables will be used. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. Default is 0. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is 'cat'. squeeze (:obj:`bool`, optional): Set to False to prevent dimensions with a size of 1 from being automatically removed from the shape of the output. Default is True. cache (:obj:`dict`, optional): A dictionary of (varname, ndarray) that can be used to supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. Returns: :obj:`list`: A list of [start_x, end_x] in the projected coordinate system. """ wrf_proj, native_geobnds = _get_wrf_proj_geobnds(var, wrfin, varname, timeidx, method, squeeze, cache) if geobounds is not None: return wrf_proj.cartopy_xlim(geobounds) return wrf_proj.cartopy_xlim(native_geobnds) def cartopy_ylim(var=None, geobounds=None, wrfin=None, varname=None, timeidx=0, method="cat", squeeze=True, cache=None): """Return the y-axis limits in the projected coordinates. For some map projections, like :class`wrf.RotatedLatLon`, the :meth:`cartopy.GeoAxes.set_extent` method does not work correctly. This method is equivalent to: .. code-block:: python pc = crs.PlateCarree() xs, ys, _ = self._cartopy().transform_points(pc, np.array([geobounds.bottom_left.lon, geobounds.top_right.lon]), np.array([geobounds.bottom_left.lat, geobounds.top_right.lat])).T _xlimits = xs.tolist() _ylimits = ys.tolist() return (_xlimits, _ylimits)[1] Args: var (:class:`xarray.DataArray`, optional): A :class:`xarray.DataArray` variable that includes latitude,longitude coordinate information. If not used, then *wrfin* must be provided. geobounds (:class:`wrf.GeoBounds`, optional): The geobounds to get the extents. If set to None and using the *var* parameter, the geobounds will be taken from the variable. If using a file, then the geobounds will be taken from the native grid. wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable, optional): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. If not used, then *var* must be provided. varname (:obj:`str`, optional): If using *wrfin*, then this will be the variable name to use to determine the geobounds. The variable can be a coordinate variable, or a regular variable that contains coordinate attributes. If None, then the 'XLAT', 'XLAT_M', 'XLONG', 'XLONG_M' variables will be used. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. Default is 0. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is
# -*- coding: utf-8 -*- tosho_1 = [ ['1301','極洋','水産・農林'], ['1332','日本水産','水産・農林'], ['1333','マルハニチロ','水産・農林'], ['1352','ホウスイ','卸売'], ['1376','カネコ種苗','水産・農林'], ['1377','サカタのタネ','水産・農林'], ['1379','ホクト','水産・農林'], ['1384','ホクリヨウ','水産・農林'], ['1414','ショーボンドホールディングス','建設'], ['1417','ミライト・ホールディングス','建設'], ['1419','タマホーム','建設'], ['1420','サンヨーホームズ','建設'], ['1514','住石ホールディングス','鉱業'], ['1515','日鉄鉱業','鉱業'], ['1518','三井松島産業','鉱業'], ['1605','国際石油開発帝石','鉱業'], ['1606','日本海洋掘削','鉱業'], ['1662','石油資源開発','鉱業'], ['1663','K&Oエナジーグループ','鉱業'], ['1712','ダイセキ環境ソリューション','建設'], ['1719','安藤・間','建設'], ['1720','東急建設','建設'], ['1721','コムシスホールディングス','建設'], ['1722','ミサワホーム','建設'], ['1726','ビーアールホールディングス','建設'], ['1762','高松コンストラクショングループ','建設'], ['1766','東建コーポレーション','建設'], ['1768','ソネック','建設'], ['1773','ワイ・ティー・エル・コーポレーション・バーハッド','建設'], ['1780','ヤマウラ','建設'], ['1801','大成建設','建設'], ['1802','大林組','建設'], ['1803','清水建設','建設'], ['1805','飛島建設','建設'], ['1808','長谷工コーポレーション','建設'], ['1810','松井建設','建設'], ['1811','錢高組','建設'], ['1812','鹿島建設','建設'], ['1813','不動テトラ','建設'], ['1814','大末建設','建設'], ['1815','鉄建建設','建設'], ['1820','西松建設','建設'], ['1821','三井住友建設','建設'], ['1822','大豊建設','建設'], ['1824','前田建設工業','建設'], ['1826','佐田建設','建設'], ['1827','ナカノフドー建設','建設'], ['1833','奥村組','建設'], ['1835','東鉄工業','建設'], ['1847','イチケン','建設'], ['1852','淺沼組','建設'], ['1860','戸田建設','建設'], ['1861','熊谷組','建設'], ['1865','青木あすなろ建設','建設'], ['1866','北野建設','建設'], ['1867','植木組','建設'], ['1868','三井ホーム','建設'], ['1870','矢作建設工業','建設'], ['1871','ピーエス三菱','建設'], ['1873','日本ハウスホールディングス','建設'], ['1878','大東建託','建設'], ['1879','新日本建設','建設'], ['1881','NIPPO','建設'], ['1882','東亜道路工業','建設'], ['1883','前田道路','建設'], ['1884','日本道路','建設'], ['1885','東亜建設工業','建設'], ['1888','若築建設','建設'], ['1890','東洋建設','建設'], ['1893','五洋建設','建設'], ['1896','大林道路','建設'], ['1898','世紀東急工業','建設'], ['1899','福田組','建設'], ['1909','日本ドライケミカル','機械'], ['1911','住友林業','建設'], ['1914','日本基礎技術','建設'], ['1916','日成ビルド工業','建設'], ['1919','ヤマダ・エスバイエルホーム','建設'], ['1921','巴コーポレーション','建設'], ['1924','パナホーム','建設'], ['1925','大和ハウス工業','建設'], ['1926','ライト工業','建設'], ['1928','積水ハウス','建設'], ['1929','日特建設','建設'], ['1930','北陸電気工事','建設'], ['1934','ユアテック','建設'], ['1937','西部電気工業','建設'], ['1939','四電工','建設'], ['1941','中電工','建設'], ['1942','関電工','建設'], ['1944','きんでん','建設'], ['1945','東京エネシス','建設'], ['1946','トーエネック','建設'], ['1949','住友電設','建設'], ['1950','日本電設工業','建設'], ['1951','協和エクシオ','建設'], ['1952','新日本空調','建設'], ['1954','日本工営','サービス'], ['1956','NDS','建設'], ['1959','九電工','建設'], ['1961','三機工業','建設'], ['1963','日揮','建設'], ['1964','中外炉工業','建設'], ['1967','ヤマト','建設'], ['1968','太平電業','建設'], ['1969','高砂熱学工業','建設'], ['1972','三晃金属工業','建設'], ['1973','NECネッツエスアイ','情報通信'], ['1975','朝日工業社','建設'], ['1976','明星工業','建設'], ['1979','大氣社','建設'], ['1980','ダイダン','建設'], ['1982','日比谷総合設備','建設'], ['1983','東芝プラントシステム','建設'], ['2001','日本製粉','食料品'], ['2002','日清製粉グループ本社','食料品'], ['2003','日東富士製粉','食料品'], ['2004','昭和産業','食料品'], ['2009','鳥越製粉','食料品'], ['2053','中部飼料','食料品'], ['2060','フィード・ワン','食料品'], ['2107','東洋精糖','食料品'], ['2108','日本甜菜製糖','食料品'], ['2109','三井製糖','食料品'], ['2112','塩水港精糖','食料品'], ['2117','日新製糖','食料品'], ['2120','ネクスト','サービス'], ['2124','ジェイエイシーリクルートメント','サービス'], ['2127','日本M&Aセンター','サービス'], ['2128','ノバレーゼ','サービス'], ['2131','アコーディア・ゴルフ','サービス'], ['2139','中広','サービス'], ['2151','タケエイ','サービス'], ['2154','トラスト・テック','サービス'], ['2168','パソナグループ','サービス'], ['2169','CDS','サービス'], ['2170','リンクアンドモチベーション','サービス'], ['2174','GCAサヴィアン','サービス'], ['2175','エス・エム・エス','サービス'], ['2181','テンプホールディングス','サービス'], ['2183','リニカル','サービス'], ['2193','クックパッド','サービス'], ['2196','エスクリ','サービス'], ['2198','アイ・ケイ・ケイ','サービス'], ['2201','森永製菓','食料品'], ['2204','中村屋','食料品'], ['2206','江崎グリコ','食料品'], ['2207','名糖産業','食料品'], ['2211','不二家','食料品'], ['2212','山崎製パン','食料品'], ['2215','第一屋製パン','食料品'], ['2217','モロゾフ','食料品'], ['2220','亀田製菓','食料品'], ['2222','寿スピリッツ','食料品'], ['2229','カルビー','食料品'], ['2264','森永乳業','食料品'], ['2266','六甲バター','食料品'], ['2267','ヤクルト本社','食料品'], ['2269','明治ホールディングス','食料品'], ['2270','雪印メグミルク','食料品'], ['2281','プリマハム','食料品'], ['2282','日本ハム','食料品'], ['2286','林兼産業','食料品'], ['2288','丸大食品','食料品'], ['2292','S Foods','食料品'], ['2296','伊藤ハム米久ホールディングス','食料品'], ['2301','学情','サービス'], ['2305','スタジオアリス','サービス'], ['2309','シミックホールディングス','サービス'], ['2317','システナ','情報通信'], ['2325','NJS','サービス'], ['2326','デジタルアーツ','情報通信'], ['2327','新日鉄住金ソリューションズ','情報通信'], ['2331','綜合警備保障','サービス'], ['2335','キューブシステム','情報通信'], ['2337','いちごグループホールディングス','サービス'], ['2353','日本駐車場開発','不動産'], ['2359','コア','情報通信'], ['2371','カカクコム','サービス'], ['2372','アイロムグループ','サービス'], ['2378','ルネサンス','サービス'], ['2379','ディップ','サービス'], ['2384','SBSホールディングス','陸運'], ['2389','オプトホールディング','サービス'], ['2395','新日本科学','サービス'], ['2398','ツクイ','サービス'], ['2410','キャリアデザインセンター','サービス'], ['2413','エムスリー','サービス'], ['2418','ツカダ・グローバルホールディング','サービス'], ['2427','アウトソーシング','サービス'], ['2428','ウェルネット','サービス'], ['2432','ディー・エヌ・エー','サービス'], ['2433','博報堂DYホールディングス','サービス'], ['2440','ぐるなび','サービス'], ['2445','エスアールジータカミヤ','サービス'], ['2453','ジャパンベストレスキューシステム','サービス'], ['2461','ファンコミュニケーションズ','サービス'], ['2462','ジェイコムホールディングス','サービス'], ['2475','WDBホールディングス','サービス'], ['2485','ティア','サービス'], ['2491','バリューコマース','サービス'], ['2492','インフォマート','サービス'], ['2501','サッポロホールディングス','食料品'], ['2502','アサヒグループホールディングス','食料品'], ['2503','キリンホールディングス','食料品'], ['2531','宝ホールディングス','食料品'], ['2533','オエノンホールディングス','食料品'], ['2540','養命酒製造','食料品'], ['2579','コカ・コーラウエスト','食料品'], ['2580','コカ・コーライーストジャパン','食料品'], ['2587','サントリー食品インターナショナル','食料品'], ['2590','ダイドードリンコ','食料品'], ['2593','伊藤園','食料品'], ['2594','キーコーヒー','食料品'], ['2597','ユニカフェ','食料品'], ['2599','ジャパンフーズ','食料品'], ['2602','日清オイリオグループ','食料品'], ['2607','不二製油グループ本社','食料品'], ['2612','かどや製油','食料品'], ['2613','J-オイルミルズ','食料品'], ['2651','ローソン','小売'], ['2659','サンエー','小売'], ['2662','ダイユーエイト','小売'], ['2664','カワチ薬品','小売'], ['2670','エービーシー・マート','小売'], ['2674','ハードオフコーポレーション','小売'], ['2676','高千穂交易','卸売'], ['2678','アスクル','小売'], ['2681','ゲオホールディングス','小売'], ['2685','アダストリア','小売'], ['2686','ジーフット','小売'], ['2687','シー・ヴイ・エス・ベイエリア','小売'], ['2692','伊藤忠食品','卸売'], ['2695','くらコーポレーション','小売'], ['2698','キャンドゥ','小売'], ['2715','エレマテック','卸売'], ['2726','パル','小売'], ['2729','JALUX','卸売'], ['2730','エディオン','小売'], ['2733','あらた','卸売'], ['2734','サーラコーポレーション','小売'], ['2735','ワッツ','小売'], ['2737','トーメンデバイス','卸売'], ['2742','ハローズ','小売'], ['2749','JPホールディングス','サービス'], ['2753','あみやき亭','小売'], ['2760','東京エレクトロンデバイス','卸売'], ['2764','ひらまつ','小売'], ['2767','フィールズ','卸売'], ['2768','双日','卸売'], ['2772','ゲンキー','小売'], ['2784','アルフレッサホールディングス','卸売'], ['2786','サッポロドラッグストアー','小売'], ['2791','大黒天物産','小売'], ['2792','ハニーズ','小売'], ['2796','ファーマライズホールディングス','小売'], ['2801','キッコーマン','食料品'], ['2802','味の素','食料品'], ['2809','キユーピー','食料品'], ['2810','ハウス食品グループ本社','食料品'], ['2811','カゴメ','食料品'], ['2812','焼津水産化学工業','食料品'], ['2815','アリアケジャパン','食料品'], ['2818','ピエトロ','食料品'], ['2819','エバラ食品工業','食料品'], ['2871','ニチレイ','食料品'], ['2874','横浜冷凍','卸売'], ['2875','東洋水産','食料品'], ['2882','イートアンド','食料品'], ['2897','日清食品ホールディングス','食料品'], ['2899','永谷園ホールディングス','食料品'], ['2904','一正蒲鉾','食料品'], ['2908','フジッコ','食料品'], ['2910','ロック・フィールド','食料品'], ['2914','日本たばこ産業','食料品'], ['2915','ケンコーマヨネーズ','食料品'], ['2918','わらべや日洋','食料品'], ['2922','なとり','食料品'], ['2930','北の達人コーポレーション','食料品'], ['2931','ユーグレナ','食料品'], ['3001','片倉工業','繊維製品'], ['3002','グンゼ','繊維製品'], ['3003','ヒューリック','不動産'], ['3004','神栄','卸売'], ['3022','山下医科器械','卸売'], ['3023','ラサ商事','卸売'], ['3028','アルペン','小売'], ['3031','ラクーン','卸売'], ['3034','クオール','小売'], ['3036','アルコニックス','卸売'], ['3038','神戸物産','卸売'], ['3046','ジェイアイエヌ','小売'], ['3048','ビックカメラ','小売'], ['3050','DCMホールディングス','小売'], ['3064','MonotaRO','小売'], ['3067','東京一番フーズ','小売'], ['3073','ダイヤモンドダイニング','小売'], ['3076','あいホールディングス','卸売'], ['3079','ディーブイエックス','卸売'], ['3082','きちり','小売'], ['3085','アークランドサービス','小売'], ['3086','J.フロントリテイリング','小売'], ['3087','ドトール・日レスホールディングス','小売'], ['3088','マツモトキヨシホールディングス','小売'], ['3091','ブロンコビリー','小売'], ['3092','スタートトゥデイ','小売'], ['3093','トレジャー・ファクトリー','小売'], ['3097','物語コーポレーション','小売'], ['3098','ココカラファイン','小売'], ['3099','三越伊勢丹ホールディングス','小売'], ['3101','東洋紡','繊維製品'], ['3103','ユニチカ','繊維製品'], ['3104','富士紡ホールディングス','繊維製品'], ['3105','日清紡ホールディングス','繊維製品'], ['3106','倉敷紡績','繊維製品'], ['3107','ダイワボウホールディングス','卸売'], ['3109','シキボウ','繊維製品'], ['3110','日東紡績','ガラス土石'], ['3116','トヨタ紡織','輸送用機器'], ['3132','マクニカ・富士エレホールディングス','卸売'], ['3141','ウエルシアホールディングス','小売'], ['3148','クリエイトSDホールディングス','小売'], ['3151','バイタルケーエスケー・ホールディングス','卸売'], ['3153','八洲電機','卸売'], ['3156','UKCホールディングス','卸売'], ['3159','丸善CHIホールディングス','小売'], ['3166','OCHIホールディングス','卸売'], ['3167','TOKAIホールディングス','卸売'], ['3169','ミサワ','小売'], ['3175','エー・ピーカンパニー','小売'], ['3176','三洋貿易','卸売'], ['3178','チムニー','小売'], ['3179','シュッピン','小売'], ['3183','ウイン・パートナーズ','卸売'], ['3186','ネクステージ','小売'], ['3191','ジョイフル本田','小売'], ['3193','鳥貴族','小売'], ['3194','キリン堂ホールディングス','小売'], ['3196','ホットランド','小売'], ['3197','すかいらーく','小売'], ['3199','綿半ホールディングス','小売'], ['3201','日本毛織','繊維製品'], ['3202','大東紡織','繊維製品'], ['3204','トーア紡コーポレーション','繊維製品'], ['3205','ダイドーリミテッド','繊維製品'], ['3222','ユナイテッド・スーパーマーケット・ホールディングス','小売'], ['3228','三栄建築設計','不動産'], ['3231','野村不動産ホールディングス','不動産'], ['3232','三重交通グループホールディングス','不動産'], ['3244','サムティ','不動産'], ['3245','ディア・ライフ','不動産'], ['3250','エー・ディー・ワークス','不動産'], ['3252','日本商業開発','不動産'], ['3254','プレサンスコーポレーション','不動産'], ['3258','ユニゾホールディングス','不動産'], ['3276','日本管理センター','不動産'], ['3277','サンセイランディック','不動産'], ['3280','エストラスト','不動産'], ['3284','フージャースホールディングス','不動産'], ['3288','オープンハウス','不動産'], ['3289','東急不動産ホールディングス','不動産'], ['3291','飯田グループホールディングス','不動産'], ['3299','ムゲンエステート','不動産'], ['3302','帝国繊維','繊維製品'], ['3313','ブックオフコーポレーション','小売'], ['3315','日本コークス工業','石油・石炭'], ['3319','ゴルフダイジェスト・オンライン','小売'], ['3321','ミタチ産業','卸売'], ['3333','あさひ','小売'], ['3341','日本調剤','小売'], ['3349','コスモス薬品','小売'], ['3360','シップヘルスケアホールディングス','卸売'], ['3361','トーエル','小売'], ['3366','一六堂','小売'], ['3371','ソフトクリエイトホールディングス','情報通信'], ['3382','セブン&アイ・ホールディングス','小売'], ['3385','薬王堂','小売'], ['3387','クリエイト・レストランツ・ホールディングス','小売'], ['3388','明治電機工業','卸売'], ['3391','ツルハホールディングス','小売'], ['3392','デリカフーズ','卸売'], ['3393','スターティア','卸売'], ['3395','サンマルクホールディングス','小売'], ['3396','フェリシモ','小売'], ['3397','トリドール','小売'], ['3398','クスリのアオキ','小売'], ['3401','帝人','繊維製品'], ['3402','東レ','繊維製品'], ['3405','クラレ','化学'], ['3407','旭化成','化学'], ['3408','サカイオーベックス','繊維製品'], ['3421','稲葉製作所','金属製品'], ['3431','宮地エンジニアリンググループ','金属製品'], ['3433','トーカロ','金属製品'], ['3434','アルファ','金属製品'], ['3436','SUMCO','金属製品'], ['3443','川田テクノロジーズ','金属製品'], ['3458','シーアールイー','不動産'], ['3501','住江織物','繊維製品'], ['3512','日本フエルト','繊維製品'], ['3513','イチカワ','繊維製品'], ['3521','エコナックホールディングス','不動産'], ['3524','日東製網','繊維製品'], ['3526','芦森工業','繊維製品'], ['3529','アツギ','繊維製品'], ['3551','ダイニック','繊維製品'], ['3553','共和レザー','化学'], ['3569','セーレン','繊維製品'], ['3577','東海染工','繊維製品'], ['3580','小松精練','繊維製品'], ['3591','ワコールホールディングス','繊維製品'], ['3593','ホギメディカル','繊維製品'], ['3606','レナウン','繊維製品'], ['3607','クラウディア','繊維製品'], ['3608','TSIホールディングス','繊維製品'], ['3626','ITホールディングス','情報通信'], ['3627','ネオス','情報通信'], ['3630','電算システム','情報通信'], ['3632','グリー','情報通信'], ['3635','コーエーテクモホールディングス','情報通信'], ['3636','三菱総合研究所','情報通信'], ['3639','ボルテージ','情報通信'], ['3640','電算','情報通信'], ['3648','AGS','情報通信'], ['3649','ファインデックス','情報通信'], ['3654','ヒト・コミュニケーションズ','情報通信'], ['3655','ブレインパッド','情報通信'], ['3656','KLab','情報通信'], ['3657','ポールトゥウィン・ピットクルーホールディングス','情報通信'], ['3658','イーブックイニシアティブジャパン','情報通信'], ['3659','ネクソン','情報通信'], ['3660','アイスタイル','情報通信'], ['3661','エムアップ','情報通信'], ['3662','エイチーム','情報通信'], ['3666','テクノスジャパン','情報通信'], ['3667','enish','情報通信'], ['3668','コロプラ','情報通信'], ['3669','モバイルクリエイト','情報通信'], ['3672','オルトプラス','情報通信'], ['3673','ブロードリーフ','情報通信'], ['3676','ハーツユナイテッドグループ','情報通信'], ['3678','メディアドゥ','情報通信'], ['3681','ブイキューブ','情報通信'], ['3683','サイバーリンクス','情報通信'], ['3686','ディー・エル・イー','情報通信'], ['3688','VOYAGEグループ','情報通信'], ['3694','オプティム','情報通信'], ['3708','特種東海製紙','パルプ・紙'], ['3724','ベリサーブ','情報通信'], ['3738','ティーガイア','情報通信'], ['3751','日本アジアグループ','情報通信'], ['3756','豆蔵ホールディングス','情報通信'], ['3762','テクマトリックス','情報通信'], ['3765','ガンホー・オンライン・エンターテイメント','情報通信'], ['3769','GMOペイメントゲートウェイ','情報通信'], ['3770','ザッパラス','情報通信'], ['3774','インターネットイニシアティブ','情報通信'], ['3778','さくらインターネット','情報通信'], ['3788','GMOクラウド','情報通信'], ['3817','SRAホールディングス','情報通信'], ['3822','Minoriソリューションズ','情報通信'], ['3826','システムインテグレータ','情報通信'], ['3834','朝日ネット','情報通信'], ['3844','コムチュア','情報通信'], ['3852','サイバーコム','情報通信'], ['3861','王子ホールディングス','パルプ・紙'], ['3863','日本製紙','パルプ・紙'], ['3864','三菱製紙','パルプ・紙'], ['3865','北越紀州製紙','パルプ・紙'], ['3877','中越パルプ工業','パルプ・紙'], ['3878','巴川製紙所','パルプ・紙'], ['3880','大王製紙','パルプ・紙'], ['3903','gumi','情報通信'], ['3919','パイプドHD','情報通信'], ['3941','レンゴー','パルプ・紙'], ['3946','トーモク','パルプ・紙'], ['3950','ザ・パック','パルプ・紙'], ['4004','昭和電工','化学'], ['4005','住友化学','化学'], ['4007','日本化成','化学'], ['4008','住友精化','化学'], ['4021','日産化学工業','化学'], ['4022','ラサ工業','化学'], ['4023','クレハ','化学'], ['4025','多木化学','化学'], ['4027','テイカ','化学'], ['4028','石原産業','化学'], ['4031','片倉コープアグリ','化学'], ['4033','日東エフシー','化学'], ['4041','日本曹達','化学'], ['4042','東ソー','化学'], ['4043','トクヤマ','化学'], ['4044','セントラル硝子','化学'], ['4045','東亞合成','化学'], ['4046','大阪ソーダ','化学'], ['4047','関東電化工業','化学'], ['4061','デンカ','化学'], ['4062','イビデン','電気機器'], ['4063','信越化学工業','化学'], ['4064','日本カーバイド工業','化学'], ['4078','堺化学工業','化学'], ['4088','エア・ウォーター','化学'], ['4091','大陽日酸','化学'], ['4092','日本化学工業','化学'], ['4095','日本パーカライジング','化学'], ['4097','高圧ガス工業','化学'], ['4098','チタン工業','化学'], ['4099','四国化成工業','化学'], ['4100','戸田工業','化学'], ['4109','ステラケミファ','化学'], ['4112','保土谷化学工業','化学'], ['4114','日本触媒','化学'], ['4116','大日精化工業','化学'], ['4118','カネカ','化学'], ['4151','協和発酵キリン','医薬品'], ['4182','三菱瓦斯化学','化学'], ['4183','三井化学','化学'], ['4185','JSR','化学'], ['4186','東京応化工業','化学'], ['4187','大阪有機化学工業','化学'], ['4188','三菱ケミカルホールディングス','化学'], ['4201','日本合成化学工業','化学'], ['4202','ダイセル','化学'], ['4203','住友ベークライト','化学'], ['4204','積水化学工業','化学'], ['4205','日本ゼオン','化学'], ['4206','アイカ工業','化学'], ['4208','宇部興産','化学'], ['4212','積水樹脂','化学'], ['4215','タキロン','化学'], ['4216','旭有機材','化学'], ['4217','日立化成','化学'], ['4218','ニチバン','化学'], ['4220','リケンテクノス','化学'], ['4221','大倉工業','化学'], ['4228','積水化成品工業','化学'], ['4229','群栄化学工業','化学'], ['4231','タイガースポリマー','化学'], ['4238','ミライアル','化学'], ['4245','ダイキアクシス','化学'], ['4246','ダイキョーニシカワ','化学'], ['4272','日本化薬','化学'], ['4275','カーリットホールディングス','化学'], ['4282','EPSホールディングス','サービス'], ['4286','レッグス','サービス'], ['4290','プレステージ・インターナショナル','サービス'], ['4295','フェイス','情報通信'], ['4299','ハイマックス','情報通信'], ['4301','アミューズ','サービス'], ['4307','野村総合研究所','情報通信'], ['4310','ドリームインキュベータ','サービス'], ['4312','サイバネットシステム','情報通信'], ['4318','クイック','サービス'], ['4319','TAC','サービス'], ['4320','CEホールディングス','情報通信'], ['4321','ケネディクス','サービス'], ['4324','電通','サービス'], ['4326','インテージホールディングス','情報通信'], ['4331','テイクアンドギヴ・ニーズ','サービス'], ['4333','東邦システムサイエンス','情報通信'], ['4337','ぴあ','サービス'], ['4343','イオンファンタジー','サービス'], ['4344','ソースネクスト','情報通信'], ['4345','シーティーエス','サービス'], ['4346','ネクシィーズグループ','サービス'], ['4350','メディカルシステムネットワーク','小売'], ['4358','ティー・ワイ・オー','情報通信'], ['4362','日本精化','化学'], ['4368','扶桑化学工業','化学'], ['4401','ADEKA','化学'], ['4403','日油','化学'], ['4404','ミヨシ油脂','食料品'], ['4406','新日本理化','化学'], ['4410','ハリマ化成グループ','化学'], ['4452','花王','化学'], ['4461','第一工業製薬','化学'], ['4465','ニイタカ','化学'], ['4471','三洋化成工業','化学'], ['4502','武田薬品工業','医薬品'], ['4503','アステラス製薬','医薬品'], ['4506','大日本住友製薬','医薬品'], ['4507','塩野義製薬','医薬品'], ['4508','田辺三菱製薬','医薬品'], ['4512','わかもと製薬','医薬品'], ['4514','あすか製薬','医薬品'], ['4516','日本新薬','医薬品'], ['4517','ビオフェルミン製薬','医薬品'], ['4519','中外製薬','医薬品'], ['4521','科研製薬','医薬品'], ['4523','エーザイ','医薬品'], ['4526','理研ビタミン','食料品'], ['4527','ロート製薬','医薬品'], ['4528','小野薬品工業','医薬品'], ['4530','久光製薬','医薬品'], ['4531','有機合成薬品工業','医薬品'], ['4534','持田製薬','医薬品'], ['4536','参天製薬','医薬品'], ['4538','扶桑薬品工業','医薬品'], ['4539','日本ケミファ','医薬品'], ['4540','ツムラ','医薬品'], ['4541','日医工','医薬品'], ['4543','テルモ','精密機器'], ['4544','みらかホールディングス','サービス'], ['4547','キッセイ薬品工業','医薬品'], ['4548','生化学工業','医薬品'], ['4549','栄研化学','医薬品'], ['4550','日水製薬','医薬品'], ['4551','鳥居薬品','医薬品'], ['4552','JCRファーマ','医薬品'], ['4553','東和薬品','医薬品'], ['4554','富士製薬工業','医薬品'], ['4555','沢井製薬','医薬品'], ['4559','ゼリア新薬工業','医薬品'], ['4568','第一三共','医薬品'], ['4569','キョーリン製薬ホールディングス','医薬品'], ['4574','大幸薬品','医薬品'], ['4577','ダイト','医薬品'], ['4578','大塚ホールディングス','医薬品'], ['4581','大正製薬ホールディングス','医薬品'], ['4587','ペプチドリーム','医薬品'], ['4611','大日本塗料','化学'], ['4612','日本ペイントホールディングス','化学'], ['4613','関西ペイント','化学'], ['4615','神東塗料','化学'], ['4617','中国塗料','化学'], ['4619','日本特殊塗料','化学'], ['4620','藤倉化成','化学'], ['4626','太陽ホールディングス','化学'], ['4631','DIC','化学'], ['4633','サカタインクス','化学'], ['4634','東洋インキSCホールディングス','化学'], ['4636','T&K TOKA','化学'], ['4641','アルプス技研','サービス'], ['4651','サニックス','サービス'], ['4653','ダイオーズ','サービス'], ['4658','日本空調サービス','サービス'], ['4661','オリエンタルランド','サービス'], ['4662','フォーカスシステムズ','情報通信'], ['4665','ダスキン','サービス'], ['4666','パーク24','不動産'], ['4668','明光ネットワークジャパン','サービス'], ['4671','ファルコホールディングス','サービス'], ['4674','クレスコ','情報通信'], ['4676','フジ・メディア・ホールディングス','情報通信'], ['4678','秀英予備校','サービス'], ['4679','田谷','サービス'], ['4680','ラウンドワン','サービス'], ['4681','リゾートトラスト','サービス'], ['4684','オービック','情報通信'], ['4686','ジャストシステム','情報通信'], ['4687','TDCソフトウェアエンジニアリング','情報通信'], ['4689','ヤフー','情報通信'], ['4694','ビー・エム・エル','サービス'], ['4696','ワタベウェディング','サービス'], ['4704','トレンドマイクロ','情報通信'], ['4708','りらいあコミュニケーションズ','サービス'], ['4709','インフォメーション・ディベロプメント','情報通信'], ['4714','リソー教育','サービス'], ['4716','日本オラクル','情報通信'], ['4718','早稲田アカデミー','サービス'], ['4719','アルファシステムズ','情報通信'], ['4722','フューチャー','情報通信'], ['4725','CACホールディングス','情報通信'], ['4726','ソフトバンク・テクノロジー','情報通信'], ['4728','トーセ','情報通信'], ['4732','ユー・エス・エス','サービス'], ['4733','オービックビジネスコンサルタント','情報通信'], ['4739','伊藤忠テクノソリューションズ','情報通信'], ['4743','アイティフォー','情報通信'], ['4745','東京個別指導学院','サービス'], ['4746','東計電算','情報通信'], ['4751','サイバーエージェント','サービス'], ['4755','楽天','サービス'], ['4762','エックスネット','情報通信'], ['4767','テー・オー・ダブリュー','サービス'], ['4768','大塚商会','情報通信'], ['4775','総合メディカル','小売'], ['4776','サイボウズ','情報通信'], ['4779','ソフトブレーン','情報通信'], ['4801','セントラルスポーツ','サービス'], ['4809','パラカ','不動産'], ['4812','電通国際情報サービス','情報通信'], ['4819','デジタルガレージ','情報通信'], ['4820','イーエムシステムズ','情報通信'], ['4825','ウェザーニューズ','情報通信'], ['4826','CIJ','情報通信'], ['4828','東洋ビジネスエンジニアリング','情報通信'], ['4829','日本エンタープライズ','情報通信'], ['4839','WOWOW','情報通信'], ['4845','フュージョンパートナー','情報通信'], ['4848','フルキャストホールディングス','サービス'], ['4850','ザ・ダウ・ケミカル・カンパニー','化学'], ['4901','富士フイルムホールディングス','化学'], ['4902','コニカミノルタ','電気機器'], ['4911','資生堂','化学'], ['4912','ライオン','化学'], ['4914','高砂香料工業','化学'], ['4917','マンダム','化学'], ['4919','ミルボン','化学'], ['4921','ファンケル','化学'], ['4922','コーセー','化学'], ['4923','コタ','化学'], ['4924','シーズ・ホールディングス','化学'], ['4926','シーボン','化学'], ['4927','ポーラ・オルビスホールディングス','化学'], ['4928','ノエビアホールディングス','化学'], ['4929','アジュバンコスメジャパン','化学'], ['4951','エステー','化学'], ['4955','アグロカネショウ','化学'], ['4956','コニシ','化学'], ['4958','長谷川香料','化学'], ['4963','星光PMC','化学'], ['4967','小林製薬','化学'], ['4968','荒川化学工業','化学'], ['4971','メック','化学'], ['4973','日本高純度化学','化学'], ['4974','タカラバイオ','化学'], ['4975','JCU','化学'], ['4977','新田ゼラチン','化学'], ['4979','OATアグリオ','化学'], ['4980','デクセリアルズ','化学'], ['4985','アース製薬','化学'], ['4989','イハラケミカル工業','化学'], ['4992','北興化学工業','化学'], ['4994','大成ラミック','化学'], ['4996','クミアイ化学工業','化学'], ['4997','日本農薬','化学'], ['5002','昭和シェル石油','石油・石炭'], ['5009','富士興産','卸売'], ['5011','ニチレキ','石油・石炭'], ['5012','東燃ゼネラル石油','石油・石炭'], ['5013','ユシロ化学工業','石油・石炭'], ['5015','ビーピー・カストロール','石油・石炭'], ['5017','富士石油','石油・石炭'], ['5018','MORESCO','石油・石炭'], ['5019','出光興産','石油・石炭'], ['5020','JXホールディングス','石油・石炭'], ['5021','コスモエネルギーホールディングス','石油・石炭'], ['5101','横浜ゴム','ゴム製品'], ['5105','東洋ゴム工業','ゴム製品'], ['5108','ブリヂストン','ゴム製品'], ['5110','住友ゴム工業','ゴム製品'], ['5121','藤倉ゴム工業','ゴム製品'], ['5122','オカモト','ゴム製品'], ['5142','アキレス','化学'], ['5185','フコク','ゴム製品'], ['5186','ニッタ','ゴム製品'], ['5187','クリエートメディック','精密機器'], ['5191','住友理工','ゴム製品'], ['5192','三ツ星ベルト','ゴム製品'], ['5195','バンドー化学','ゴム製品'], ['5196','鬼怒川ゴム工業','輸送用機器'], ['5201','旭硝子','ガラス土石'], ['5202','日本板硝子','ガラス土石'], ['5204','石塚硝子','ガラス土石'], ['5208','有沢製作所','化学'], ['5210','日本山村硝子','ガラス土石'], ['5214','日本電気硝子','ガラス土石'], ['5218','オハラ','ガラス土石'], ['5232','住友大阪セメント','ガラス土石'], ['5233','太平洋セメント','ガラス土石'], ['5234','デイ・シイ','ガラス土石'], ['5261','リゾートソリューション','サービス'], ['5262','日本ヒューム','ガラス土石'], ['5269','日本コンクリート工業','ガラス土石'], ['5273','三谷セキサン','ガラス土石'], ['5288','アジアパイルホールディングス','ガラス土石'], ['5301','東海カーボン','ガラス土石'], ['5302','日本カーボン','ガラス土石'], ['5310','東洋炭素','ガラス土石'], ['5331','ノリタケカンパニーリミテド','ガラス土石'], ['5332','TOTO','ガラス土石'], ['5333','日本碍子','ガラス土石'], ['5334','日本特殊陶業','ガラス土石'], ['5337','ダントーホールディングス','ガラス土石'], ['5344','MARUWA','ガラス土石'], ['5351','品川リフラクトリーズ','ガラス土石'], ['5352','黒崎播磨','ガラス土石'], ['5357','ヨータイ','ガラス土石'], ['5358','イソライト工業','ガラス土石'], ['5363','東京窯業','ガラス土石'], ['5367','ニッカトー','ガラス土石'], ['5384','フジミインコーポレーテッド','ガラス土石'], ['5391','エーアンドエーマテリアル','ガラス土石'], ['5393','ニチアス','ガラス土石'], ['5401','新日鐵住金','鉄鋼'], ['5406','神戸製鋼所','鉄鋼'], ['5408','中山製鋼所','鉄鋼'], ['5410','合同製鐵','鉄鋼'], ['5411','ジェイエフイーホールディングス','鉄鋼'], ['5413','日新製鋼','鉄鋼'], ['5423','東京製鐵','鉄鋼'], ['5440','共英製鋼','鉄鋼'], ['5444','大和工業','鉄鋼'], ['5445','東京鐵鋼','鉄鋼'], ['5449','大阪製鐵','鉄鋼'], ['5451','淀川製鋼所','鉄鋼'], ['5453','東洋鋼鈑','鉄鋼'], ['5463','丸一鋼管','鉄鋼'], ['5464','モリ工業','鉄鋼'], ['5471','大同特殊鋼','鉄鋼'], ['5476','日本高周波鋼業','鉄鋼'], ['5480','日本冶金工業','鉄鋼'], ['5481','山陽特殊製鋼','鉄鋼'], ['5482','愛知製鋼','鉄鋼'], ['5486','日立金属','鉄鋼'], ['5491','日本金属','鉄鋼'], ['5541','大平洋金属','鉄鋼'], ['5563','新日本電工','鉄鋼'], ['5602','栗本鐵工所','鉄鋼'], ['5603','虹技','鉄鋼'], ['5612','日本鋳鉄管','鉄鋼'], ['5631','日本製鋼所','機械'], ['5632','三菱製鋼','鉄鋼'], ['5658','日亜鋼業','鉄鋼'], ['5659','日本精線','鉄鋼'], ['5702','大紀アルミニウム工業所','非鉄金属'], ['5703','日本軽金属ホールディングス','非鉄金属'], ['5706','三井金属鉱業','非鉄金属'], ['5707','東邦亜鉛','非鉄金属'], ['5711','三菱マテリアル','非鉄金属'], ['5713','住友金属鉱山','非鉄金属'], ['5714','DOWAホールディングス','非鉄金属'], ['5715','古河機械金属','非鉄金属'], ['5721','エス・サイエンス','非鉄金属'], ['5726','大阪チタニウムテクノロジーズ','非鉄金属'], ['5727','東邦チタニウム','非鉄金属'], ['5741','UACJ','非鉄金属'], ['5801','古河電気工業','非鉄金属'], ['5802','住友電気工業','非鉄金属'], ['5803','フジクラ','非鉄金属'], ['5805','昭和電線ホールディングス','非鉄金属'], ['5807','東京特殊電線','非鉄金属'], ['5809','タツタ電線','非鉄金属'], ['5815','沖電線','非鉄金属'], ['5819','カナレ電気','非鉄金属'], ['5821','平河ヒューテック','非鉄金属'], ['5851','リョービ','非鉄金属'], ['5852','アーレスティ','非鉄金属'], ['5857','アサヒホールディングス','非鉄金属'], ['5901','東洋製罐グループホールディングス','金属製品'], ['5902','ホッカンホールディングス','金属製品'], ['5909','コロナ','金属製品'], ['5911','横河ブリッジホールディングス','金属製品'], ['5912','OSJBホールディングス','金属製品'], ['5915','駒井ハルテック','金属製品'], ['5923','高田機工','金属製品'], ['5929','三和ホールディングス','金属製品'], ['5930','文化シヤッター','金属製品'], ['5932','三協立山','金属製品'], ['5933','アルインコ','金属製品'], ['5936','東洋シヤッター','金属製品'], ['5938','LIXILグループ','金属製品'], ['5942','日本フイルコン','金属製品'], ['5943','ノーリツ','金属製品'], ['5946','長府製作所','金属製品'], ['5947','リンナイ','金属製品'], ['5949','ユニプレス','輸送用機器'], ['5951','ダイニチ工業','金属製品'], ['5957','日東精工','金属製品'], ['5958','三洋工業','金属製品'], ['5959','岡部','金属製品'], ['5970','ジーテクト','金属製品'], ['5974','中国工業','金属製品'], ['5975','東プレ','金属製品'], ['5976','高周波熱錬','金属製品'], ['5981','東京製綱','金属製品'], ['5985','サンコール','金属製品'], ['5986','モリテックスチール','金属製品'], ['5988','パイオラックス','金属製品'], ['5991','日本発條','金属製品'], ['5992','中央発條','金属製品'], ['5998','アドバネクス','金属製品'], ['6005','三浦工業','機械'], ['6013','タクマ','機械'], ['6028','テクノプロ・ホールディングス','サービス'], ['6032','インターワークス','サービス'], ['6036','KeePer技研','サービス'], ['6037','ファーストロジック','サービス'], ['6054','リブセンス','サービス'], ['6055','ジャパンマテリアル','サービス'], ['6058','ベクトル','サービス'], ['6059','ウチヤマホールディングス','サービス'], ['6065','サクセスホールディングス','サービス'], ['6070','キャリアリンク','サービス'], ['6071','IBJ','サービス'], ['6073','アサンテ','サービス'], ['6077','N・フィールド','サービス'], ['6080','M&Aキャピタルパートナーズ','サービス'], ['6082','ライドオン・エクスプレス','サービス'], ['6083','ERIホールディングス','サービス'], ['6087','アビスト','サービス'], ['6089','ウィルグループ','サービス'], ['6097','日本ビューホテル','サービス'], ['6098','リクルートホールディングス','サービス'], ['6099','エラン','サービス'], ['6101','ツガミ','機械'], ['6103','オークマ','機械'], ['6104','東芝機械','機械'], ['6113','アマダホールディングス','機械'], ['6118','アイダエンジニアリング','機械'], ['6121','滝澤鉄工所','機械'], ['6134','富士機械製造','機械'], ['6135','牧野フライス製作所','機械'], ['6136','オーエスジー','機械'], ['6138','ダイジェット工業','機械'], ['6140','旭ダイヤモンド工業','機械'], ['6141','DMG森精機','機械'], ['6143','ソディック','機械'], ['6146','ディスコ','機械'], ['6151','日東工器','機械'], ['6165','パンチ工業','機械'], ['6178','日本郵政','サービス'], ['6183','ベルシステム24ホールディングス','サービス'], ['6201','豊田自動織機','輸送用機器'], ['6203','豊和工業','機械'], ['6205','OKK','機械'], ['6208','石川製作所','機械'], ['6210','東洋機械金属','機械'], ['6217','津田駒工業','機械'], ['6218','エンシュウ','機械'], ['6222','島精機製作所','機械'], ['6236','NCホールディングス','機械'], ['6238','フリュー','機械'], ['6240','ヤマシンフィルタ','機械'], ['6247','日阪製作所','機械'], ['6250','やまびこ','機械'], ['6262','ペガサスミシン製造','機械'], ['6268','ナブテスコ','機械'], ['6269','三井海洋開発','機械'], ['6272','レオン自動機','機械'], ['6273','SMC','機械'], ['6274','新川','機械'], ['6277','ホソカワミクロン','機械'], ['6278','ユニオンツール','機械'], ['6282','オイレス工業','機械'], ['6284','日精エー・エス・ビー機械','機械'], ['6287','サトーホールディングス','機械'], ['6291','日本エアーテック','機械'], ['6293','日精樹脂工業','機械'], ['6294','オカダアイヨン','機械'], ['6298','ワイエイシイ','機械'], ['6301','小松製作所','機械'], ['6302','住友重機械工業','機械'], ['6305','日立建機','機械'], ['6306','日工','機械'], ['6309','巴工業','機械'], ['6310','井関農機','機械'], ['6315','TOWA','機械'], ['6316','丸山製作所','機械'], ['6317','北川鉄工所','機械'], ['6319','シンニッタン','鉄鋼'], ['6325','タカキタ','機械'], ['6326','クボタ','機械'], ['6328','荏原実業','機械'], ['6330','東洋エンジニアリング','建設'], ['6331','三菱化工機','機械'], ['6332','月島機械','機械'], ['6333','帝国電機製作所','機械'], ['6335','東京機械製作所','機械'], ['6339','新東工業','機械'], ['6340','澁谷工業','機械'], ['6345','アイチコーポレーション','機械'], ['6349','小森コーポレーション','機械'], ['6351','鶴見製作所','機械'], ['6355','住友精密工業','機械'], ['6358','酒井重工業','機械'], ['6361','荏原製作所','機械'], ['6362','石井鐵工所','機械'], ['6363','酉島製作所','機械'], ['6364','北越工業','機械'], ['6366','千代田化工建設','建設'], ['6367','ダイキン工業','機械'], ['6368','オルガノ','機械'], ['6369','トーヨーカネツ','機械'], ['6370','栗田工業','機械'], ['6371','椿本チエイン','機械'], ['6373','大同工業','機械'], ['6376','日機装','精密機器'], ['6378','木村化工機','機械'], ['6379','新興プランテック','建設'], ['6381','アネスト岩田','機械'], ['6383','ダイフク','機械'], ['6387','サムコ','機械'], ['6390','加藤製作所','機械'], ['6393','油研工業','機械'], ['6395','タダノ','機械'], ['6406','フジテック','機械'], ['6407','CKD','機械'], ['6409','キトー','機械'], ['6412','平和','機械'], ['6413','理想科学工業','機械'], ['6417','SANKYO','機械'], ['6418','日本金銭機械','機械'], ['6419','マースエンジニアリング','機械'], ['6420','福島工業','機械'], ['6428','オーイズミ','機械'], ['6430','ダイコク電機','機械'], ['6432','竹内製作所','機械'], ['6436','アマノ','機械'], ['6440','JUKI','機械'], ['6444','サンデンホールディングス','機械'], ['6445','蛇の目ミシン工業','機械'], ['6448','ブラザー工業','電気機器'], ['6454','マックス','機械'], ['6455','モリタホールディングス','輸送用機器'], ['6457','グローリー','機械'], ['6458','新晃工業','機械'], ['6459','大和冷機工業','機械'], ['6460','セガサミーホールディングス','機械'], ['6461','日本ピストンリング','機械'], ['6462','リケン','機械'], ['6463','TPR','機械'], ['6464','ツバキ・ナカシマ','機械'], ['6465','ホシザキ電機','機械'], ['6470','大豊工業','機械'], ['6471','日本精工','機械'], ['6472','NTN','機械'], ['6473','ジェイテクト','機械'], ['6474','不二越','機械'], ['6479','ミネベア','電気機器'], ['6480','日本トムソン','機械'], ['6481','THK','機械'], ['6482','ユーシン精機','機械'], ['6485','前澤給装工業','機械'], ['6486','イーグル工業','機械'], ['6489','前澤工業','機械'], ['6490','日本ピラー工業','機械'], ['6498','キッツ','機械'], ['6501','日立製作所','電気機器'], ['6502','東芝','電気機器'], ['6503','三菱電機','電気機器'], ['6504','富士電機','電気機器'], ['6505','東洋電機製造','電気機器'], ['6506','安川電機','電気機器'], ['6507','シンフォニアテクノロジー','電気機器'], ['6508','明電舎','電気機器'], ['6513','オリジン電気','電気機器'], ['6516','山洋電気','電気機器'], ['6517','デンヨー','電気機器'], ['6581','日立工機','機械'], ['6584','三櫻工業','輸送用機器'], ['6586','マキタ','機械'], ['6588','東芝テック','電気機器'], ['6590','芝浦メカトロニクス','電気機器'], ['6592','マブチモーター','電気機器'], ['6594','日本電産','電気機器'], ['6615','ユー・エム・シー・エレクトロニクス','電気機器'], ['6617','東光高岳','電気機器'], ['6619','ダブル・スコープ','電気機器'], ['6620','宮越ホールディングス','電気機器'], ['6622','ダイヘン','電気機器'], ['6624','田淵電機','電気機器'], ['6630','ヤーマン','電気機器'], ['6632','JVCケンウッド','電気機器'], ['6638','ミマキエンジニアリング','電気機器'], ['6640','第一精工','電気機器'], ['6641','日新電機','電気機器'], ['6644','大崎電気工業','電気機器'], ['6645','オムロン','電気機器'], ['6651','日東工業','電気機器'], ['6652','IDEC','電気機器'], ['6654','不二電機工業','電気機器'], ['6674','ジーエス・ユアサコーポレーション','電気機器'], ['6675','サクサホールディングス','電気機器'], ['6676','メルコホールディングス','電気機器'], ['6678','テクノメディカ','電気機器'], ['6701','日本電気','電気機器'], ['6702','富士通','電気機器'], ['6703','沖電気工業','電気機器'], ['6704','岩崎通信機','電気機器'], ['6706','電気興業','電気機器'], ['6707','サンケン電気','電気機器'], ['6715','ナカヨ','電気機器'], ['6718','アイホン','電気機器'], ['6723','ルネサスエレクトロニクス','電気機器'], ['6724','セイコーエプソン','電気機器'], ['6727','ワコム','電気機器'], ['6728','アルバック','電気機器'], ['6730','アクセル','電気機器'], ['6737','EIZO','電気機器'], ['6740','ジャパンディスプレイ','電気機器'], ['6741','日本信号','電気機器'], ['6742','京三製作所','電気機器'], ['6744','能美防災','電気機器'], ['6745','ホーチキ','電気機器'], ['6750','エレコム','電気機器'], ['6751','日本無線','電気機器'], ['6752','パナソニック','電気機器'], ['6753','シャープ','電気機器'], ['6754','アンリツ','電気機器'], ['6755','富士通ゼネラル','電気機器'], ['6756','日立国際電気','電気機器'], ['6758','ソニー','電気機器'], ['6762','TDK','電気機器'], ['6763','帝国通信工業','電気機器'], ['6767','ミツミ電機','電気機器'], ['6768','タムラ製作所','電気機器'], ['6770','アルプス電気','電気機器'], ['6771','池上通信機','電気機器'], ['6773','パイオニア','電気機器'], ['6779','日本電波工業','電気機器'], ['6785','鈴木','電気機器'], ['6788','日本トリム','電気機器'], ['6789','ローランドディー.ジー.','電気機器'], ['6791','日本コロムビア','情報通信'], ['6794','フォスター電機','電気機器'], ['6796','クラリオン','電気機器'], ['6798','SMK','電気機器'], ['6800','ヨコオ','電気機器'], ['6803','ティアック','電気機器'], ['6804','ホシデン','電気機器'], ['6806','ヒロセ電機','電気機器'], ['6807','日本航空電子工業','電気機器'], ['6809','TOA','電気機器'], ['6810','日立マクセル','電気機器'], ['6814','古野電気','電気機器'], ['6815','ユニデンホールディングス','電気機器'], ['6816','アルパイン','電気機器'], ['6817','スミダコーポレーション','電気機器'], ['6820','アイコム','電気機器'], ['6823','リオン','電気機器'], ['6826','本多通信工業','電気機器'], ['6839','船井電機','電気機器'], ['6841','横河電機','電気機器'], ['6844','新電元工業','電気機器'], ['6845','アズビル','電気機器'], ['6848','東亜ディーケーケー','電気機器'], ['6849','日本光電工業','電気機器'], ['6850','チノー','電気機器'], ['6853','共和電業','電気機器'], ['6855','日本電子材料','電気機器'], ['6856','堀場製作所','電気機器'], ['6857','アドバンテスト','電気機器'], ['6858','小野測器','電気機器'], ['6859','エスペック','電気機器'], ['6860','パナソニックデバイスSUNX','電気機器'], ['6861','キーエンス','電気機器'], ['6866','日置電機','電気機器'], ['6869','シスメックス','電気機器'], ['6871','日本マイクロニクス','電気機器'], ['6875','メガチップス','電気機器'], ['6877','OBARA GROUP','電気機器'], ['6879','イマジカ・ロボットホールディングス','情報通信'], ['6901','澤藤電機','電気機器'], ['6902','デンソー','輸送用機器'], ['6905','コーセル','電気機器'], ['6908','イリソ電子工業','電気機器'], ['6911','新日本無線','電気機器'], ['6914','オプテックス','電気機器'], ['6915','千代田インテグレ','電気機器'], ['6916','アイ・オー・データ機器','電気機器'], ['6920','レーザーテック','電気機器'], ['6923','スタンレー電気','電気機器'], ['6924','岩崎電気','電気機器'], ['6925','ウシオ電機','電気機器'], ['6926','岡谷電機産業','電気機器'], ['6927','ヘリオステクノホールディング','電気機器'], ['6929','日本セラミック','電気機器'], ['6932','遠藤照明','電気機器'], ['6935','日本デジタル研究所','電気機器'], ['6937','古河電池','電気機器'], ['6938','双信電機','電気機器'], ['6941','山一電機','電気機器'], ['6947','図研','電気機器'], ['6951','日本電子','電気機器'], ['6952','カシオ計算機','電気機器'], ['6954','ファナック','電気機器'], ['6958','日本シイエムケイ','電気機器'], ['6961','エンプラス','電気機器'], ['6962','大真空','電気機器'], ['6963','ローム','電気機器'], ['6965','浜松ホトニクス','電気機器'], ['6966','三井ハイテック','電気機器'], ['6967','新光電気工業','電気機器'], ['6971','京セラ','電気機器'], ['6973','協栄産業','卸売'], ['6976','太陽誘電','電気機器'], ['6981','村田製作所','電気機器'], ['6985','ユーシン','電気機器'], ['6986','双葉電子工業','電気機器'], ['6988','日東電工','化学'], ['6989','北陸電気工業','電気機器'], ['6995','東海理化電機製作所','輸送用機器'], ['6996','ニチコン','電気機器'], ['6997','日本ケミコン','電気機器'], ['6999','KOA','電気機器'], ['7003','三井造船','輸送用機器'], ['7004','日立造船','機械'], ['7011','三菱重工業','機械'], ['7012','川崎重工業','輸送用機器'], ['7013','IHI','機械'], ['7014','名村造船所','輸送用機器'], ['7022','サノヤスホールディングス','輸送用機器'], ['7102','日本車輌製造','輸送用機器'], ['7105','ニチユ三菱フォークリフト','輸送用機器'], ['7122','近畿車輛','輸送用機器'], ['7148','FPG','証券・先物'], ['7150','島根銀行','銀行'], ['7161','じもとホールディングス','銀行'], ['7164','全国保証','その他金融'], ['7167','足利ホールディングス','銀行'], ['7173','東京TYフィナンシャルグループ','銀行'], ['7180','九州フィナンシャルグループ','銀行'], ['7181','かんぽ生命保険','保険'], ['7182','ゆうちょ銀行','銀行'], ['7184','富山第一銀行','銀行'], ['7186','コンコルディア・フィナンシャルグループ','銀行'], ['7201','日産自動車','輸送用機器'], ['7202','いすゞ自動車','輸送用機器'], ['7203','トヨタ自動車','輸送用機器'], ['7205','日野自動車','輸送用機器'], ['7211','三菱自動車工業','輸送用機器'], ['7212','エフテック','輸送用機器'], ['7213','レシップホールディングス','輸送用機器'], ['7214','GMB','輸送用機器'], ['7215','ファルテック','輸送用機器'], ['7220','武蔵精密工業','輸送用機器'], ['7222','日産車体','輸送用機器'], ['7224','新明和工業','輸送用機器'], ['7226','極東開発工業','輸送用機器'], ['7230','日信工業','輸送用機器'], ['7231','トピー工業','輸送用機器'], ['7236','ティラド','輸送用機器'], ['7238','曙ブレーキ工業','輸送用機器'], ['7239','タチエス','輸送用機器'], ['7240','NOK','輸送用機器'], ['7241','フタバ産業','輸送用機器'], ['7242','KYB','輸送用機器'], ['7244','市光工業','電気機器'], ['7245','大同メタル工業','輸送用機器'], ['7246','プレス工業','輸送用機器'], ['7247','ミクニ','輸送用機器'], ['7248','カルソニックカンセイ','輸送用機器'], ['7250','太平洋工業','輸送用機器'], ['7251','ケーヒン','輸送用機器'], ['7256','河西工業','輸送用機器'], ['7259','アイシン精機','輸送用機器'], ['7260','富士機工','輸送用機器'], ['7261','マツダ','輸送用機器'], ['7262','ダイハツ工業','輸送用機器'], ['7266','今仙電機製作所','輸送用機器'], ['7267','本田技研工業','輸送用機器'], ['7269','スズキ','輸送用機器'], ['7270','富士重工業','輸送用機器'], ['7271','安永','輸送用機器'], ['7272','ヤマハ発動機','輸送用機器'], ['7274','ショーワ','輸送用機器'], ['7276','小糸製作所','電気機器'], ['7277','TBK','輸送用機器'], ['7278','エクセディ','輸送用機器'], ['7280','ミツバ','電気機器'], ['7282','豊田合成','輸送用機器'], ['7283','愛三工業','輸送用機器'], ['7284','盟和産業','輸送用機器'], ['7294','ヨロズ','輸送用機器'], ['7296','エフ・シー・シー','輸送用機器'], ['7305','新家工業','鉄鋼'], ['7309','シマノ','輸送用機器'], ['7312','タカタ','輸送用機器'], ['7313','テイ・エステック','輸送用機器'], ['7408','ジャムコ','輸送用機器'], ['7414','小野建','卸売'], ['7416','はるやま商事','小売'], ['7420','佐鳥電機','卸売'], ['7421','カッパ・クリエイト','小売'], ['7427','エコートレーディング','卸売'], ['7433','伯東','卸売'], ['7438','コンドーテック','卸売'], ['7442','中山福','卸売'], ['7445','ライトオン','小売'], ['7447','ナガイレーベン','卸売'], ['7448','ジーンズメイト','小売'],
new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_Set,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_ReSize,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_Clear,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_Bind,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_IsBound,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_UnBind,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.Find = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_Find,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_ChangeFind,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_Find1,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_ChangeFind1,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger__kill_pointed,None,TopTools_DataMapOfShapeListOfInteger) TopTools_DataMapOfShapeListOfInteger_swigregister = _TopTools.TopTools_DataMapOfShapeListOfInteger_swigregister TopTools_DataMapOfShapeListOfInteger_swigregister(TopTools_DataMapOfShapeListOfInteger) class TopTools_DataMapOfShapeListOfShape(OCC.TCollection.TCollection_BasicMap): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ :param NbBuckets: default value is 1 :type NbBuckets: int :rtype: None """ _TopTools.TopTools_DataMapOfShapeListOfShape_swiginit(self,_TopTools.new_TopTools_DataMapOfShapeListOfShape(*args)) def Assign(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeListOfShape & :rtype: TopTools_DataMapOfShapeListOfShape """ return _TopTools.TopTools_DataMapOfShapeListOfShape_Assign(self, *args) def Set(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeListOfShape & :rtype: TopTools_DataMapOfShapeListOfShape """ return _TopTools.TopTools_DataMapOfShapeListOfShape_Set(self, *args) def ReSize(self, *args): """ :param NbBuckets: :type NbBuckets: int :rtype: None """ return _TopTools.TopTools_DataMapOfShapeListOfShape_ReSize(self, *args) def Clear(self, *args): """ :rtype: None """ return _TopTools.TopTools_DataMapOfShapeListOfShape_Clear(self, *args) def Bind(self, *args): """ :param K: :type K: TopoDS_Shape & :param I: :type I: TopTools_ListOfShape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeListOfShape_Bind(self, *args) def IsBound(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeListOfShape_IsBound(self, *args) def UnBind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeListOfShape_UnBind(self, *args) def Find(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: TopTools_ListOfShape """ return _TopTools.TopTools_DataMapOfShapeListOfShape_Find(self, *args) def ChangeFind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: TopTools_ListOfShape """ return _TopTools.TopTools_DataMapOfShapeListOfShape_ChangeFind(self, *args) def Find1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeListOfShape_Find1(self, *args) def ChangeFind1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeListOfShape_ChangeFind1(self, *args) def _kill_pointed(self): """_kill_pointed(TopTools_DataMapOfShapeListOfShape self)""" return _TopTools.TopTools_DataMapOfShapeListOfShape__kill_pointed(self) def __del__(self): try: self.thisown = False GarbageCollector.garbage.collect_object(self) except: pass TopTools_DataMapOfShapeListOfShape.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_Assign,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.Set = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_Set,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_ReSize,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_Clear,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_Bind,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_IsBound,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_UnBind,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.Find = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_Find,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_ChangeFind,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_Find1,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape_ChangeFind1,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfShape__kill_pointed,None,TopTools_DataMapOfShapeListOfShape) TopTools_DataMapOfShapeListOfShape_swigregister = _TopTools.TopTools_DataMapOfShapeListOfShape_swigregister TopTools_DataMapOfShapeListOfShape_swigregister(TopTools_DataMapOfShapeListOfShape) class TopTools_DataMapOfShapeReal(OCC.TCollection.TCollection_BasicMap): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ :param NbBuckets: default value is 1 :type NbBuckets: int :rtype: None """ _TopTools.TopTools_DataMapOfShapeReal_swiginit(self,_TopTools.new_TopTools_DataMapOfShapeReal(*args)) def Assign(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeReal & :rtype: TopTools_DataMapOfShapeReal """ return _TopTools.TopTools_DataMapOfShapeReal_Assign(self, *args) def Set(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeReal & :rtype: TopTools_DataMapOfShapeReal """ return _TopTools.TopTools_DataMapOfShapeReal_Set(self, *args) def ReSize(self, *args): """ :param NbBuckets: :type NbBuckets: int :rtype: None """ return _TopTools.TopTools_DataMapOfShapeReal_ReSize(self, *args) def Clear(self, *args): """ :rtype: None """ return _TopTools.TopTools_DataMapOfShapeReal_Clear(self, *args) def Bind(self, *args): """ :param K: :type K: TopoDS_Shape & :param I: :type I: float & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeReal_Bind(self, *args) def IsBound(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeReal_IsBound(self, *args) def UnBind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeReal_UnBind(self, *args) def Find(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: float """ return _TopTools.TopTools_DataMapOfShapeReal_Find(self, *args) def ChangeFind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: float """ return _TopTools.TopTools_DataMapOfShapeReal_ChangeFind(self, *args) def Find1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeReal_Find1(self, *args) def ChangeFind1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeReal_ChangeFind1(self, *args) def _kill_pointed(self): """_kill_pointed(TopTools_DataMapOfShapeReal self)""" return _TopTools.TopTools_DataMapOfShapeReal__kill_pointed(self) def __del__(self): try: self.thisown = False GarbageCollector.garbage.collect_object(self) except: pass TopTools_DataMapOfShapeReal.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_Assign,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.Set = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_Set,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_ReSize,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_Clear,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_Bind,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_IsBound,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_UnBind,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.Find = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_Find,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_ChangeFind,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_Find1,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal_ChangeFind1,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfShapeReal__kill_pointed,None,TopTools_DataMapOfShapeReal) TopTools_DataMapOfShapeReal_swigregister = _TopTools.TopTools_DataMapOfShapeReal_swigregister TopTools_DataMapOfShapeReal_swigregister(TopTools_DataMapOfShapeReal) class TopTools_DataMapOfShapeSequenceOfShape(OCC.TCollection.TCollection_BasicMap): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ :param NbBuckets: default value is 1 :type NbBuckets: int :rtype: None """ _TopTools.TopTools_DataMapOfShapeSequenceOfShape_swiginit(self,_TopTools.new_TopTools_DataMapOfShapeSequenceOfShape(*args)) def Assign(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeSequenceOfShape & :rtype: TopTools_DataMapOfShapeSequenceOfShape """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_Assign(self, *args) def Set(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeSequenceOfShape & :rtype: TopTools_DataMapOfShapeSequenceOfShape """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_Set(self, *args) def ReSize(self, *args): """ :param NbBuckets: :type NbBuckets: int :rtype: None """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_ReSize(self, *args) def Clear(self, *args): """ :rtype: None """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_Clear(self, *args) def Bind(self, *args): """ :param K: :type K: TopoDS_Shape & :param I: :type I: TopTools_SequenceOfShape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_Bind(self, *args) def IsBound(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_IsBound(self, *args) def UnBind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_UnBind(self, *args) def Find(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: TopTools_SequenceOfShape """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_Find(self, *args) def ChangeFind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: TopTools_SequenceOfShape """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_ChangeFind(self, *args) def Find1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_Find1(self, *args) def ChangeFind1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeSequenceOfShape_ChangeFind1(self, *args) def _kill_pointed(self): """_kill_pointed(TopTools_DataMapOfShapeSequenceOfShape self)""" return _TopTools.TopTools_DataMapOfShapeSequenceOfShape__kill_pointed(self) def __del__(self): try: self.thisown = False GarbageCollector.garbage.collect_object(self) except: pass TopTools_DataMapOfShapeSequenceOfShape.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_Assign,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.Set = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_Set,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_ReSize,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_Clear,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_Bind,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_IsBound,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_UnBind,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.Find = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_Find,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_ChangeFind,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_Find1,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape_ChangeFind1,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfShapeSequenceOfShape__kill_pointed,None,TopTools_DataMapOfShapeSequenceOfShape) TopTools_DataMapOfShapeSequenceOfShape_swigregister = _TopTools.TopTools_DataMapOfShapeSequenceOfShape_swigregister TopTools_DataMapOfShapeSequenceOfShape_swigregister(TopTools_DataMapOfShapeSequenceOfShape) class TopTools_DataMapOfShapeShape(OCC.TCollection.TCollection_BasicMap): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ :param NbBuckets: default value is 1 :type NbBuckets: int :rtype: None """ _TopTools.TopTools_DataMapOfShapeShape_swiginit(self,_TopTools.new_TopTools_DataMapOfShapeShape(*args)) def Assign(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeShape & :rtype: TopTools_DataMapOfShapeShape """ return _TopTools.TopTools_DataMapOfShapeShape_Assign(self, *args) def Set(self, *args): """ :param Other: :type Other: TopTools_DataMapOfShapeShape & :rtype: TopTools_DataMapOfShapeShape """ return _TopTools.TopTools_DataMapOfShapeShape_Set(self, *args) def ReSize(self, *args): """ :param NbBuckets: :type NbBuckets: int :rtype: None """ return _TopTools.TopTools_DataMapOfShapeShape_ReSize(self, *args) def Clear(self, *args): """ :rtype: None """ return _TopTools.TopTools_DataMapOfShapeShape_Clear(self, *args) def Bind(self, *args): """ :param K: :type K: TopoDS_Shape & :param I: :type I: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeShape_Bind(self, *args) def IsBound(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeShape_IsBound(self, *args) def UnBind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: bool """ return _TopTools.TopTools_DataMapOfShapeShape_UnBind(self, *args) def Find(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: TopoDS_Shape """ return _TopTools.TopTools_DataMapOfShapeShape_Find(self, *args) def ChangeFind(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: TopoDS_Shape """ return _TopTools.TopTools_DataMapOfShapeShape_ChangeFind(self, *args) def Find1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeShape_Find1(self, *args) def ChangeFind1(self, *args): """ :param K: :type K: TopoDS_Shape & :rtype: Standard_Address """ return _TopTools.TopTools_DataMapOfShapeShape_ChangeFind1(self, *args) def _kill_pointed(self): """_kill_pointed(TopTools_DataMapOfShapeShape self)""" return _TopTools.TopTools_DataMapOfShapeShape__kill_pointed(self) def __del__(self): try: self.thisown = False GarbageCollector.garbage.collect_object(self) except: pass TopTools_DataMapOfShapeShape.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_Assign,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.Set = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_Set,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_ReSize,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_Clear,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_Bind,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_IsBound,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_UnBind,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.Find = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_Find,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_ChangeFind,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_Find1,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape_ChangeFind1,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfShapeShape__kill_pointed,None,TopTools_DataMapOfShapeShape) TopTools_DataMapOfShapeShape_swigregister = _TopTools.TopTools_DataMapOfShapeShape_swigregister TopTools_DataMapOfShapeShape_swigregister(TopTools_DataMapOfShapeShape) class TopTools_HArray1OfListOfShape(OCC.MMgt.MMgt_TShared): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ :param Low: :type Low: int :param Up: :type Up: int :rtype: None :param Low: :type Low: int :param Up: :type Up: int :param V: :type V: TopTools_ListOfShape & :rtype: None """ _TopTools.TopTools_HArray1OfListOfShape_swiginit(self,_TopTools.new_TopTools_HArray1OfListOfShape(*args)) def Init(self, *args): """ :param V: :type V: TopTools_ListOfShape & :rtype: None """ return _TopTools.TopTools_HArray1OfListOfShape_Init(self, *args) def Length(self, *args): """ :rtype: int """ return _TopTools.TopTools_HArray1OfListOfShape_Length(self, *args) def Lower(self, *args): """ :rtype: int """ return _TopTools.TopTools_HArray1OfListOfShape_Lower(self, *args) def Upper(self, *args): """ :rtype: int """ return _TopTools.TopTools_HArray1OfListOfShape_Upper(self, *args) def SetValue(self, *args): """ :param Index: :type Index: int :param Value: :type Value: TopTools_ListOfShape & :rtype: None """ return _TopTools.TopTools_HArray1OfListOfShape_SetValue(self, *args) def Value(self, *args): """ :param Index: :type Index: int :rtype: TopTools_ListOfShape """ return _TopTools.TopTools_HArray1OfListOfShape_Value(self, *args) def ChangeValue(self, *args): """ :param Index: :type Index: int :rtype: TopTools_ListOfShape """ return _TopTools.TopTools_HArray1OfListOfShape_ChangeValue(self, *args) def Array1(self, *args): """ :rtype: TopTools_Array1OfListOfShape """ return _TopTools.TopTools_HArray1OfListOfShape_Array1(self, *args) def ChangeArray1(self, *args): """ :rtype: TopTools_Array1OfListOfShape """ return _TopTools.TopTools_HArray1OfListOfShape_ChangeArray1(self, *args) def _kill_pointed(self): """_kill_pointed(TopTools_HArray1OfListOfShape self)""" return _TopTools.TopTools_HArray1OfListOfShape__kill_pointed(self) def GetHandle(self): """GetHandle(TopTools_HArray1OfListOfShape self) -> Handle_TopTools_HArray1OfListOfShape""" return _TopTools.TopTools_HArray1OfListOfShape_GetHandle(self) def __del__(self): try: self.thisown = False GarbageCollector.garbage.collect_object(self) except: pass TopTools_HArray1OfListOfShape.Init = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_Init,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.Length = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_Length,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.Lower = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_Lower,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.Upper = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_Upper,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.SetValue = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_SetValue,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.Value = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_Value,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.ChangeValue = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_ChangeValue,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.Array1 = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_Array1,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.ChangeArray1 = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_ChangeArray1,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape._kill_pointed = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape__kill_pointed,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape.GetHandle = new_instancemethod(_TopTools.TopTools_HArray1OfListOfShape_GetHandle,None,TopTools_HArray1OfListOfShape) TopTools_HArray1OfListOfShape_swigregister = _TopTools.TopTools_HArray1OfListOfShape_swigregister TopTools_HArray1OfListOfShape_swigregister(TopTools_HArray1OfListOfShape) class Handle_TopTools_HArray1OfListOfShape(OCC.MMgt.Handle_MMgt_TShared): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): _TopTools.Handle_TopTools_HArray1OfListOfShape_swiginit(self,_TopTools.new_Handle_TopTools_HArray1OfListOfShape(*args)) DownCast = staticmethod(_TopTools.Handle_TopTools_HArray1OfListOfShape_DownCast) def __del__(self): try: self.thisown = False GarbageCollector.garbage.collect_object(self) except: pass Handle_TopTools_HArray1OfListOfShape.Nullify = new_instancemethod(_TopTools.Handle_TopTools_HArray1OfListOfShape_Nullify,None,Handle_TopTools_HArray1OfListOfShape) Handle_TopTools_HArray1OfListOfShape.IsNull = new_instancemethod(_TopTools.Handle_TopTools_HArray1OfListOfShape_IsNull,None,Handle_TopTools_HArray1OfListOfShape) Handle_TopTools_HArray1OfListOfShape.GetObject = new_instancemethod(_TopTools.Handle_TopTools_HArray1OfListOfShape_GetObject,None,Handle_TopTools_HArray1OfListOfShape) Handle_TopTools_HArray1OfListOfShape._kill_pointed =
<reponame>artofimagination/stereo-calibration-and-vSLAM import os import numpy as np import glob import shutil from pathlib import Path from backend import Backend, States, Modes from pointCloudGLWidget import PointCloudGLWidget from linePlotWidget import LinePlotWidget from PyQt5 import QtCore from PyQt5.QtCore import QThread from PyQt5.QtWidgets import QMainWindow, QGridLayout, QAction from PyQt5.QtWidgets import QSpinBox, QLabel, QHBoxLayout, QFileDialog from PyQt5.QtWidgets import QWidget, QApplication, QCheckBox from PyQt5.QtWidgets import QPushButton, QTabWidget, QVBoxLayout from PyQt5.QtWidgets import QDoubleSpinBox, QComboBox, QGroupBox # Deletes all files in the content path. def _clearContent(content): files = glob.glob(content) for f in files: if not os.path.isdir(f): os.remove(f) files = glob.glob(content) for f in files: if not os.path.isdir(f): os.remove(f) # Calibration widget. # the only reason it is created to catch the press 'n' event. class CalibWidget(QWidget): takeImageTriggered = QtCore.pyqtSignal() def keyPressEvent(self, event): super(CalibWidget, self).keyPressEvent(event) if event.key() == QtCore.Qt.Key_N: self.takeImageTriggered.emit() # Main Qt UI window class MainWindow(QMainWindow): # Folder that contains the saved UI settings. SETTINGS_DIR = "settings" def __init__(self, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) # Initialize worker thread related members. self.workerThread = QThread(self) self.worker = Backend() self.worker.signals.framesSent.connect(self.updateVideo) self.worker.signals.finished.connect(self.thread_complete) self.worker.signals.error.connect(self.sigint_handler) # init UI. self.setMinimumSize(200, 100) mainLayout = QGridLayout() self._createMenu() # Create tabs tabwidget = QTabWidget() calibratorLayout = self._createCalibrationUI() self.calibratorLayoutWidget = CalibWidget() self.calibratorLayoutWidget.setLayout(calibratorLayout) tabwidget.addTab(self.calibratorLayoutWidget, "Sensor calibration") bmConfiguratorLayout = self._createBlockMatchingConfiguratorUI() bmConfiguratorLayoutWidget = QWidget() bmConfiguratorLayoutWidget.setLayout(bmConfiguratorLayout) tabwidget.addTab( bmConfiguratorLayoutWidget, "Block Matching Configurator") featureDetectorLayout = self._createFeatureDetectionUI() featureDetectionLayoutWidget = QWidget() featureDetectionLayoutWidget.setLayout(featureDetectorLayout) tabwidget.addTab(featureDetectionLayoutWidget, "Feature detection") motionEstimationLayout = self._createMotionEstimationUI() motionEstimationLayoutWidget = QWidget() motionEstimationLayoutWidget.setLayout(motionEstimationLayout) tabwidget.addTab(motionEstimationLayoutWidget, "Motion estimation") vSlamUILayout = self._createVSlamUI() vSlamUILayoutWidget = QWidget() vSlamUILayoutWidget.setLayout(vSlamUILayout) tabwidget.addTab(vSlamUILayoutWidget, "Visual SLAM") self._initUIElements() mainLayout.addWidget(tabwidget, 0, 0) mainWidget = QWidget() mainWidget.setLayout(mainLayout) self.setCentralWidget(mainWidget) desktop = QApplication.desktop() screenRect = desktop.screenGeometry() self.resize(screenRect.width(), screenRect.height()) if not os.path.isdir(self.SETTINGS_DIR): os.mkdir(self.SETTINGS_DIR) self.show() # Saves all UI values into an npz file. # Saves all calibration images and chessboard.npz for each sensor # in the folder named identical to the settings npz file def _saveValues(self, settingsName): np.savez_compressed( settingsName, bm_textureThreshold=self.textureThreshold.value(), bm_min_disp=self.min_disp.value(), bm_num_disp=self.num_disp.value(), bm_blocksize=self.blockSize.value(), bm_uniquenessRatio=self.uniquenessRatio.value(), bm_speckleWindowSize=self.speckleWindowSize.value(), bm_speckleRange=self.speckleRange.value(), bm_disp12MaxDiff=self.disp12MaxDiff.value(), bm_preFilterType=self.preFilterType.value(), bm_preFilterSize=self.preFilterSize.value(), bm_preFilterCap=self.preFilterCap.value(), bm_smallerBlockSize=self.smallerBlockSize.value(), bm_mode=self.blockMatching.currentIndex(), bm_drawEpipolar=self.drawEpipolar.isChecked(), bm_resolution=self.resolutionBm.currentIndex(), bm_leftCameraIndex=self.bmCameraIndexLeft.currentIndex(), bm_rightCameraIndex=self.bmCameraIndexLeft.currentIndex(), pc_fov=self.fov.value(), pc_samplingRatio=self.samplingRatio.value(), pc_ignoreRendererMaxDepth=self.rendererMaxDepth.value(), cal_calib_image_index=self.calib_image_index.value(), cal_rms_limit=self.rms_limit.value(), cal_advanced=self.advanced.isChecked(), cal_ignoreExitingImageData=self .ignoreExistingImageData .isChecked(), cal_rms_increment=self.increment.value(), cal_max_rms=self.max_rms.value(), cal_resolution=self.resolutionCal.currentIndex(), cal_leftCameraIndex=self.calibCameraIndexLeft.currentIndex(), cal_rightCameraIndex=self.calibCameraIndexRight.currentIndex(), feat_featureDetector=self.featureDetector.currentIndex(), feat_featureMatcher=self.featureMatcher.currentIndex(), feat_maxDistance=self.maxDistance.value(), motion_inliers=self.inliers.value(), motion_maxDepth=self.maxDepth.value(), motion_reprojectionError=self.reprojectionError.value()) settingsPath = Path(settingsName).with_suffix('') settingsPath = settingsPath.stem files = glob.glob(f"calibImages/left/{str(settingsPath)}/*") for f in files: os.remove(f) files = glob.glob(f"calibImages/right/{str(settingsPath)}/*") for f in files: os.remove(f) leftDirectory = f"calibImages/left/{str(settingsPath)}" if not os.path.isdir(leftDirectory): os.mkdir(leftDirectory) files = glob.glob("calibImages/left/*") for f in files: if not os.path.isdir(f): shutil.copy(f, leftDirectory) rightDirectory = f"calibImages/right/{str(settingsPath)}" if not os.path.isdir(rightDirectory): os.mkdir(rightDirectory) files = glob.glob("calibImages/right/*") for f in files: if not os.path.isdir(f): shutil.copy(f, rightDirectory) # Saves UI settings and calibration images/data # Also creates a lastSaved folder # for quick loading last saved info when the application starts. def saveSettings(self): options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog fileName, _ = QFileDialog.getSaveFileName( self, "QFileDialog.getSaveFileName()", "", "npz (*.npz)", options=options) if fileName: self._saveValues(fileName) _clearContent("calibImages/left/lastSaved/*") _clearContent("calibImages/left/lastSaved/*") self._saveValues(f"{self.SETTINGS_DIR}/lastSaved.npz") # Loads and sets settings values from the npz file. # Also loads the appropriate calib images and data. def _setLoadedValues(self, settingsName): _clearContent("calibImages/left/*") _clearContent("calibImages/right/*") settings = np.load(settingsName) self.textureThreshold.setValue(settings["bm_textureThreshold"]) self.min_disp.setValue(settings["bm_min_disp"]) self.num_disp.setValue(settings["bm_num_disp"]) self.blockSize.setValue(settings["bm_blocksize"]) self.uniquenessRatio.setValue(settings["bm_uniquenessRatio"]) self.speckleWindowSize.setValue( settings["bm_speckleWindowSize"]) self.speckleRange.setValue(settings["bm_speckleRange"]) self.disp12MaxDiff.setValue(settings["bm_disp12MaxDiff"]) self.preFilterType.setValue(settings["bm_preFilterType"]) self.preFilterSize.setValue(settings["bm_preFilterSize"]) self.preFilterCap.setValue(settings["bm_preFilterCap"]) self.blockMatching.setCurrentIndex(settings["bm_mode"]) self.drawEpipolar.setChecked(bool(settings["bm_drawEpipolar"])) self.smallerBlockSize.setValue(settings["bm_smallerBlockSize"]) self.resolutionBm.setCurrentIndex(settings["bm_resolution"]) self.bmCameraIndexLeft.setCurrentIndex(settings["bm_leftCameraIndex"]) self.bmCameraIndexRight.setCurrentIndex(settings["bm_rightCameraIndex"]) self.fov.setValue(settings["pc_fov"]) self.samplingRatio.setValue(settings["pc_samplingRatio"]) self.rendererMaxDepth.setValue(settings["pc_ignoreRendererMaxDepth"]) self.calib_image_index.setValue(settings["cal_calib_image_index"]) self.rms_limit.setValue(settings["cal_rms_limit"]) self.advanced.setChecked(bool(settings["cal_advanced"])) self.ignoreExistingImageData.setChecked( bool(settings["cal_ignoreExitingImageData"])) self.increment.setValue(settings["cal_rms_increment"]) self.max_rms.setValue(settings["cal_max_rms"]) self.resolutionCal.setCurrentIndex(settings["cal_resolution"]) self.calibCameraIndexLeft.setCurrentIndex(settings["cal_leftCameraIndex"]) self.calibCameraIndexRight.setCurrentIndex(settings["cal_rightCameraIndex"]) self.featureDetector.setCurrentIndex(settings["feat_featureDetector"]) self.featureMatcher.setCurrentIndex(settings["feat_featureMatcher"]) self.maxDistance.setValue(settings["feat_maxDistance"]) self.inliers.setValue(settings["motion_inliers"]) self.maxDepth.setValue(settings["motion_maxDepth"]) self.reprojectionError.setValue(settings["motion_reprojectionError"]) settingsPath = Path(settingsName).with_suffix('') settingsPath = settingsPath.stem directory = f"calibImages/left/{str(settingsPath)}" if os.path.isdir(str(directory)): files = glob.glob(f"{directory}/*") for f in files: shutil.copy(f, "calibImages/left/") else: print( f"No left calib images to load \ (calibImages/left/{str(settingsPath)})") directory = f"calibImages/right/{str(settingsPath)}" if os.path.isdir(str(directory)): files = glob.glob(f"{directory}/*") for f in files: shutil.copy(f, "calibImages/right/") else: print(f"No left calib images to load \ (calibImages/left/{str(settingsPath)})") # Loads settings,calib images and data. def loadSettings(self): options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog fileName, _ = QFileDialog.getOpenFileName( self, "QFileDialog.getOpenFileName()", "", "npz (*.npz)", options=options) if fileName: try: self._setLoadedValues(fileName) except IOError: print("Settings file at {0} not found" .format(fileName)) self.sigint_handler() # Creates the menu items. def _createMenu(self): saveAction = QAction('&Save Settings', self) saveAction.setShortcut('Ctrl+S') saveAction.setStatusTip('Save settings') saveAction.triggered.connect(self.saveSettings) loadAction = QAction('&Load Settings', self) loadAction.setShortcut('Ctrl+L') loadAction.setStatusTip('Load settings') loadAction.triggered.connect(self.loadSettings) exitAction = QAction('&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(self.sigint_handler) # Create menu bar and add action menuBar = self.menuBar() fileMenu = menuBar.addMenu('&File') fileMenu.addAction(saveAction) fileMenu.addAction(loadAction) fileMenu.addAction(exitAction) # Creates the calibration tab UI. def _createCalibrationUI(self): layout = QGridLayout() helpLabel = QLabel("Quick user guide:\n\ 1. To start calibration interface, press start.\n\ 2. Once started, to capture a pair of frames, press 'n' or 'Take image'\ when your sensor's are in the desired position.\n\ 3. When enough calib images are create press process, to create the final\ calibration file.\n\n\ Use modes:\n\ a, Simple (Default): capture as many calibration images as you want, \ when done press 'Process'.\n\ b, Advanced: during capture the system will analyse the calibration RMS\ and will throw away if there is ahigh RMS result. See more in README.md\n\ When finished press 'Process'") calibInfoLabel = QLabel("Calibration info") self.calibInfo = QLabel() RMSLabel = QLabel("RMS") self.RMSValue = QLabel() labelLayout = QVBoxLayout() labelLayout.addWidget(helpLabel) labelLayout.addWidget(calibInfoLabel) labelLayout.addWidget(self.calibInfo) labelLayout.addWidget(RMSLabel) labelLayout.addWidget(self.RMSValue) labelsLayoutWidget = QWidget() labelsLayoutWidget.setLayout(labelLayout) self.resolutionCal = QComboBox() self.resolutionCal.addItems(["480p", "720p"]) ignoreExistingImageDataLabel = QLabel("Ignore existing image data") self.ignoreExistingImageData = QCheckBox() self.ignoreExistingImageData.setDisabled(True) advancedLabel = QLabel("Advanced use mode") self.advanced = QCheckBox() self.advanced.setDisabled(True) optionsLayout = QHBoxLayout() optionsLayout.addWidget(QLabel("Resolution")) optionsLayout.addWidget(self.resolutionCal) optionsLayout.addWidget(ignoreExistingImageDataLabel) optionsLayout.addWidget(self.ignoreExistingImageData) optionsLayout.addWidget(advancedLabel) optionsLayout.addWidget(self.advanced) optionsLayoutWidget = QWidget() optionsLayoutWidget.setLayout(optionsLayout) self.calib_image_index = QSpinBox() self.rms_limit = QDoubleSpinBox() self.increment = QDoubleSpinBox() self.max_rms = QDoubleSpinBox() configLayout = QHBoxLayout() configLayout.addWidget(QLabel("calib_image_index")) configLayout.addWidget(self.calib_image_index) configLayout.addWidget(QLabel("rms_limit")) configLayout.addWidget(self.rms_limit) configLayout.addWidget(QLabel("increment")) configLayout.addWidget(self.increment) configLayout.addWidget(QLabel("max_rms")) configLayout.addWidget(self.max_rms) configLayoutWidget = QWidget() configLayoutWidget.setLayout(configLayout) self.calib_image_index.setRange(0, 1000) self.calib_image_index.setSingleStep(1) self.rms_limit.setRange(0, 10.0) self.rms_limit.setDecimals(5) self.rms_limit.setSingleStep(0.005) self.increment.setRange(0, 1.0) self.increment.setDecimals(5) self.increment.setSingleStep(0.005) self.max_rms.setRange(0, 10.0) self.max_rms.setDecimals(5) self.max_rms.setSingleStep(0.005) cameraLayout = QGridLayout() displayGroupBox = QGroupBox("Visualisation") displayGroupBox.setLayout(cameraLayout) self.video0Calib = QLabel() self.calibSwapCameras = QPushButton("Swap lenses") self.calibCameraIndexLeft = QComboBox() cameraLayout.addWidget(self.video0Calib, 0, 0, 4, 3) cameraLayout.addWidget(self.calibSwapCameras, 5, 0, 1, 1) cameraLayout.addWidget(QLabel("Left camera indices"), 5, 1, 1, 1) cameraLayout.addWidget(self.calibCameraIndexLeft, 5, 2, 1, 1) self.video1Calib = QLabel() self.calibCameraIndexRight = QComboBox() cameraLayout.addWidget(self.video1Calib, 0, 3, 4, 3) cameraLayout.addWidget(QLabel("Right camera indices"), 5, 3, 1, 1) cameraLayout.addWidget(self.calibCameraIndexRight, 5, 4, 1, 1) layout.addWidget(displayGroupBox, 0, 0, 1, 1) start = QPushButton("Start") self.process = QPushButton("Process") self.process.hide() self.takeImage = QPushButton("Take image") self.takeImage.hide() start.clicked.connect(self._startCalibration) buttonLayout = QHBoxLayout() buttonLayout.addWidget(start) buttonLayout.addWidget(self.process) buttonLayout.addWidget(self.takeImage) buttonLayoutWidget = QWidget() buttonLayoutWidget.setLayout(buttonLayout) layout.addWidget(labelsLayoutWidget, 1, 0, 1, 2) layout.addWidget(optionsLayoutWidget, 2, 0, 1, 4) layout.addWidget(configLayoutWidget, 3, 0, 1, 4) layout.addWidget(buttonLayoutWidget, 4, 0, 1, 4) return layout # Creates the block matching UI. def _createBlockMatchingConfiguratorUI(self): self.win_sizeUpdated = QtCore.pyqtSignal(int) self.min_dispUpdated = QtCore.pyqtSignal(int) self.num_dispUpdated = QtCore.pyqtSignal(int) self.blockSizeUpdated = QtCore.pyqtSignal(int) self.uniquenessRatioUpdated = QtCore.pyqtSignal(int) self.speckleWindowSizeUpdated = QtCore.pyqtSignal(int) self.speckleRangeUpdated = QtCore.pyqtSignal(int) self.disp12MaxDiffUpdated = QtCore.pyqtSignal(int) layout = QGridLayout() self.textureThresholdLabel = QLabel("textureThreshold") self.textureThreshold = QSpinBox() self.min_disp = QSpinBox() self.num_disp = QSpinBox() self.blockSize = QSpinBox() self.uniquenessRatio = QSpinBox() self.speckleWindowSize = QSpinBox() self.speckleRange = QSpinBox() self.disp12MaxDiff = QSpinBox() self.preFilterSizeLabel = QLabel("preFilterSize") self.preFilterSize = QSpinBox() self.preFilterTypeLabel = QLabel("preFilterType") self.preFilterType = QSpinBox() self.preFilterCapLabel = QLabel("preFilterCap") self.preFilterCap = QSpinBox() self.smallerBlockSizeLabel = QLabel("smallerBlockSize") self.smallerBlockSize = QSpinBox() self.start = QPushButton("Start") self.start.clicked.connect(self._startBMConfiguration) # Init spin boxes self.textureThreshold.setRange(0, 10000) self.min_disp.setRange(-1, 1000) self.min_disp.setSingleStep(1) self.num_disp.setRange(16, 1000) self.num_disp.setSingleStep(16) self.blockSize.setRange(5, 255) self.blockSize.setSingleStep(2) self.smallerBlockSize.setRange(-1, 1000000) self.smallerBlockSize.setSingleStep(1) self.uniquenessRatio.setRange(0, 1000) self.speckleWindowSize.setRange(-1, 1000) self.speckleRange.setRange(-1, 1000) self.disp12MaxDiff.setRange(-1, 1000) self.preFilterType.setRange(0, 1) self.preFilterType.setSingleStep(1) self.preFilterSize.setRange(5, 255) self.preFilterSize.setSingleStep(2) self.preFilterCap.setRange(1, 63) self.preFilterCap.setSingleStep(1) self.resolutionBm = QComboBox() self.resolutionBm.addItems(["480p", "720p"]) self.blockMatching = QComboBox() self.blockMatching.addItems( ["Block Matching", "Semi-Global Block Matching"]) self.drawEpipolar = QCheckBox() self.video0Bm = QLabel() self.bmCameraIndexLeft = QComboBox() self.bmSwapCameras = QPushButton("Swap lenses") self.video1Bm = QLabel() self.bmCameraIndexRight = QComboBox() self.video_disp = QLabel() self.pointCloud = PointCloudGLWidget() self.fov = QSpinBox() self.fov.setRange(1, 360) self.fov.valueChanged.connect( self.pointCloud.setFov) self.rendererMaxDepth = QSpinBox() self.rendererMaxDepth.setRange(1, 5000) self.rendererMaxDepth.valueChanged.connect( self.pointCloud.setIgnoreDepthLimit) self.samplingRatio = QSpinBox() self.samplingRatio.setRange(1, 10000) self.samplingRatio.setSingleStep(50) self.samplingRatio.valueChanged.connect( self.pointCloud.setSamplingRatio) cameraLayout = QGridLayout() displayGroupBox = QGroupBox("Visualisation") displayGroupBox.setLayout(cameraLayout) pointCloudLayout = QGridLayout() pointCloudLayout.addWidget(self.pointCloud, 0, 0, 4, 4) pointCloudLayout.addWidget(QLabel("Field of view"), 0, 4, 1, 1) pointCloudLayout.addWidget(self.fov, 0, 5, 1, 1) pointCloudLayout.addWidget(QLabel("Sampling ratio"), 1, 4, 1, 1) pointCloudLayout.addWidget(self.samplingRatio, 1, 5, 1, 1) pointCloudLayout.addWidget(QLabel("Ignore depth"), 2, 4, 1, 1) pointCloudLayout.addWidget(self.rendererMaxDepth, 2, 5, 1, 1) pointCloudControl = QGroupBox("Point cloud") pointCloudControl.setLayout(pointCloudLayout) cameraLayout.addWidget(self.video0Bm, 0, 0, 4, 4) cameraLayout.addWidget(self.bmSwapCameras, 5, 0, 1, 1) cameraLayout.addWidget(QLabel("Left camera indices"), 5, 1, 1, 1) cameraLayout.addWidget(self.bmCameraIndexLeft, 5, 2, 1, 1) cameraLayout.addWidget(self.video1Bm, 0, 3, 4, 4) cameraLayout.addWidget(QLabel("Right camera indices"), 5, 3, 1, 1) cameraLayout.addWidget(self.bmCameraIndexRight, 5, 4, 1, 1) cameraLayout.addWidget(self.video_disp, 6, 0, 4, 4) cameraLayout.addWidget(pointCloudControl, 6, 3, 4, 4) layout.addWidget(displayGroupBox, 0, 0, 1, 6) bmControlLayout = QGridLayout() bmControlLayout.addWidget(QLabel("Block matching type"), 0, 0, 1, 1) bmControlLayout.addWidget(self.blockMatching, 0, 1, 1, 1) bmControlLayout.addWidget(QLabel("Draw epipolar lines"), 0, 2, 1, 1) bmControlLayout.addWidget(self.drawEpipolar, 0, 3, 1, 1) bmControlLayout.addWidget(QLabel("Resolution"), 0, 4, 1, 1) bmControlLayout.addWidget(self.resolutionBm, 0, 5, 1, 1) bmControlLayout.addWidget(self.textureThresholdLabel, 1, 0, 1, 1) bmControlLayout.addWidget(self.textureThreshold, 1, 1, 1, 1) bmControlLayout.addWidget(QLabel("min_disp"),
import itertools from enum import IntEnum from typing import Dict, List, Set from uuid import uuid4 import networkx as nx import numpy as np import sm.misc as M import sm.outputs as O from grams.algorithm.semantic_graph import ( SGColumnNode, SGEdge, SGEntityValueNode, SGLiteralValueNode, SGNode, SGStatementNode, ) from grams.algorithm.wdont import WDOnt from grams.inputs.linked_table import LinkedTable from kgdata.wikidata.models import QNode, WDClass, WDProperty from rdflib import RDFS from sm.evaluation import sm_metrics from sm.misc import identity_func from sm.misc.graph import viz_graph class SMNodeType(IntEnum): Column = 0 Class = 1 Statement = 2 Entity = 3 Literal = 4 class WikidataSemanticModelHelper(WDOnt): ENTITY_ID = "Q35120" ENTITY_LABEL = "Entity (Q35120)" ID_PROPS = {str(RDFS.label)} def norm_sm(self, sm: O.SemanticModel): """ "Normalize the semantic model with the following modifications: 1. Add readable label to edge and class 2. Convert direct link (without statement) to have statement except the id props. """ new_sm = sm.clone() # update readable label for n in new_sm.iter_nodes(): if isinstance(n, O.ClassNode): if self.is_uri_qnode(n.abs_uri): n.readable_label = self.get_qnode_label(n.abs_uri) elif isinstance(n, O.LiteralNode): if self.is_uri_qnode(n.value): n.readable_label = self.get_qnode_label(n.value) for e in new_sm.iter_edges(): if e.abs_uri not in self.ID_PROPS: e.readable_label = self.get_pnode_label(e.abs_uri) # convert direct link for edge in list(new_sm.iter_edges()): if edge.abs_uri in self.ID_PROPS: continue source = new_sm.get_node(edge.source) target = new_sm.get_node(edge.target) if ( not isinstance(source, O.ClassNode) or source.abs_uri != WDOnt.STATEMENT_URI ) and ( not isinstance(target, O.ClassNode) or target.abs_uri != WDOnt.STATEMENT_URI ): # this is direct link, we replace its edge assert len(new_sm.get_edges_between_nodes(source.id, target.id)) == 1 new_sm.remove_edges_between_nodes(source.id, target.id) stmt = O.ClassNode( str(uuid4()), WDOnt.STATEMENT_URI, WDOnt.STATEMENT_REL_URI, False, "Statement", ) new_sm.add_node(stmt) new_sm.add_edge( O.Edge( source=edge.source, target=stmt.id, abs_uri=edge.abs_uri, rel_uri=edge.rel_uri, approximation=edge.approximation, readable_label=edge.readable_label, ) ) new_sm.add_edge( O.Edge( source=stmt.id, target=edge.target, abs_uri=edge.abs_uri, rel_uri=edge.rel_uri, approximation=edge.approximation, readable_label=edge.readable_label, ) ) return new_sm @staticmethod def minify_sm(sm: O.SemanticModel): """This is a reverse function of `norm_sm`: 1. Remove an intermediate statement if it doesn't have any qualifiers """ new_sm = sm.clone() for n in sm.iter_nodes(): if isinstance(n, O.ClassNode) and n.abs_uri == WDOnt.STATEMENT_URI: inedges = sm.incoming_edges(n.id) outedges = sm.outgoing_edges(n.id) if len(outedges) == 1 and outedges[0].abs_uri == inedges[0].abs_uri: # no qualifiers new_sm.remove_node(n.id) for inedge in inedges: assert inedge.abs_uri == outedges[0].abs_uri new_sm.add_edge( O.Edge( inedge.source, outedges[0].target, inedge.abs_uri, inedge.rel_uri, # just in case user misannotate to not include approximation in both links inedge.approximation or outedges[0].approximation, inedge.readable_label, ) ) return new_sm def create_sm(self, table: LinkedTable, cpa: nx.MultiDiGraph, cta: Dict[int, str]): """Create a semantic model from outputs of CPA and CTA tasks""" sm = O.SemanticModel() # create class nodes first classcount = {} classmap = {} # mapping from column to its class node for cid, qnode_id in cta.items(): dnode = O.DataNode( id=f"col-{cid}", col_index=cid, label=table.table.columns[cid].name or "", ) # somehow, they may end-up predict multiple classes, we need to select one if qnode_id.find(" ") != -1: qnode_id = qnode_id.split(" ")[0] curl = self.get_qnode_uri(qnode_id) cnode_id = f"{curl}:{classcount.get(qnode_id, 0)}" classcount[qnode_id] = classcount.get(qnode_id, 0) + 1 try: cnode_label = self.get_qnode_label(curl) except KeyError: cnode_label = f"wd:{qnode_id}" cnode = O.ClassNode( id=cnode_id, abs_uri=curl, rel_uri=f"wd:{qnode_id}", readable_label=cnode_label, ) classmap[dnode.id] = cnode.id sm.add_node(dnode) sm.add_node(cnode) sm.add_edge( O.Edge( source=cnode.id, target=dnode.id, abs_uri=str(RDFS.label), rel_uri="rdfs:label", ) ) # do a final sweep to add subject columns that are not in CTA for uid, unode in cpa.nodes(data="data"): # type: ignore unode: SGNode if not isinstance(unode, SGColumnNode): continue outdegree: int = cpa.out_degree(uid) # type: ignore if outdegree > 0 and not sm.has_node(f"col-{unode.column}"): # add data node to the graph and use the entity class (all instances belong to this class) to describe this data node dnode = O.DataNode( id=f"col-{unode.column}", col_index=unode.column, label=table.table.columns[unode.column].name or "", ) sm.add_node(dnode) curl = self.get_qnode_uri(self.ENTITY_ID) cnode_id = f"{curl}:{classcount.get(self.ENTITY_ID, 0)}" classcount[self.ENTITY_ID] = classcount.get(self.ENTITY_ID, 0) + 1 sm.add_node( O.ClassNode( id=cnode_id, abs_uri=curl, rel_uri=f"wd:{self.ENTITY_ID}", readable_label=self.ENTITY_LABEL, ) ) classmap[dnode.id] = cnode_id sm.add_edge( O.Edge( source=cnode_id, target=dnode.id, abs_uri=str(RDFS.label), rel_uri="rdfs:label", ) ) # now add remaining edges and remember to use class node instead of data node for uid, vid, edge in cpa.edges(data="data"): # type: ignore edge: SGEdge unode: SGNode = cpa.nodes[uid]["data"] vnode: SGNode = cpa.nodes[vid]["data"] if isinstance(unode, SGColumnNode): # outgoing edge is from a class node instead of a data node suid = classmap[f"col-{unode.column}"] source = sm.get_node(suid) elif isinstance(unode, SGEntityValueNode): source = O.LiteralNode( id=unode.id, value=self.get_qnode_uri(unode.qnode_id), readable_label=self.get_qnode_label(unode.qnode_id), datatype=O.LiteralNodeDataType.Entity, is_in_context=unode.qnode_id == table.context.page_entity_id, ) sm.add_node(source) else: assert isinstance( unode, SGStatementNode ), "Outgoing edge can't not be from literal" # create a statement node source = O.ClassNode( id=unode.id, abs_uri=WDOnt.STATEMENT_URI, rel_uri=WDOnt.STATEMENT_REL_URI, ) sm.add_node(source) if isinstance(vnode, SGColumnNode): svid = f"col-{vnode.column}" if svid in classmap: target = sm.get_node(classmap[svid]) elif sm.has_node(svid): target = sm.get_node(svid) else: target = O.DataNode( id=f"col-{vnode.column}", col_index=vnode.column, label=table.table.columns[vnode.column].name or "", ) sm.add_node(target) elif isinstance(vnode, SGEntityValueNode): target = O.LiteralNode( id=vnode.id, value=self.get_qnode_uri(vnode.qnode_id), readable_label=self.get_qnode_label(vnode.qnode_id), datatype=O.LiteralNodeDataType.Entity, is_in_context=vnode.qnode_id == table.context.page_entity_id, ) sm.add_node(target) elif isinstance(vnode, SGLiteralValueNode): target = O.LiteralNode( id=vnode.id, value=vnode.value.to_string_repr(), readable_label=vnode.label, datatype=O.LiteralNodeDataType.String, ) sm.add_node(target) else: # create a statement node target = O.ClassNode( id=vnode.id, abs_uri=WDOnt.STATEMENT_URI, rel_uri=WDOnt.STATEMENT_REL_URI, ) sm.add_node(target) prop_uri = self.get_prop_uri(edge.predicate) sm.add_edge( O.Edge( source=source.id, target=target.id, abs_uri=self.get_prop_uri(edge.predicate), rel_uri=f"p:{edge.predicate}", readable_label=self.get_pnode_label(prop_uri), ) ) M.log("grams", semantic_model=sm, cpa=cpa, cta=cta) return sm def gen_equivalent_sm( self, sm: O.SemanticModel, strict: bool = True, force_inversion: bool = False, incorrect_invertible_props: Set[str] = None, ): """Given a semantic model (not being modified), generate equivalent models by inferring inverse properties. Parameters ---------- sm: the input semantic model (original) strict: whether to throw exception when target of an inverse property is not a class. force_inversion: only work when strict mode is set to false. Without force_inverse, we skip inverse properties, otherwise, we generate an inverse model with a special class: wikibase:DummyClassForInversion Returns ------- """ """Given an semantic model (not being modified), generate equivalent models by inferring inverse properties. Running on strict mode mean it will check if the invertible property is apply to a non-class node (column that . Currently, we only inverse the properties, not qualifiers. """ sm = self.norm_sm(sm) if incorrect_invertible_props is None: incorrect_invertible_props = set() invertible_stmts = [] is_class_fn = lambda n1: n1.is_class_node or ( n1.is_literal_node and self.is_uri_qnode(n1.value) ) for n in sm.iter_nodes(): if n.is_class_node and WDOnt.is_uri_statement(n.abs_uri): inedges = sm.incoming_edges(n.id) outedges = sm.outgoing_edges(n.id) # only has one prop (prop,) = list({inedge.abs_uri for inedge in inedges}) pid = self.get_prop_id(prop) stmt_has_value = False for outedge in outedges: if outedge.abs_uri != prop: # assert len(self.wdprops[self.get_prop_id(outedge.abs_uri)].inverse_properties) == 0, "Just to make sure" \ # "that qualifiers is not invertable. Otherwise, this algorithm will missing one generated SMs" # character role has an inverse property: performer. They can be used as qualifier so nothing to do here just pass pass else: stmt_has_value = True if ( len(self.wdprops[pid].inverse_properties) > 0 and pid not in incorrect_invertible_props and stmt_has_value ): # invertible property # people seem to misunderstand what inverse_property means in RDF; # inverse doesn't apply to data property but only object property. # so we catch the error here to detect what we should fix. (outedge,) = [ outedge for outedge in outedges if outedge.abs_uri == prop ] targets_are_class = is_class_fn(sm.get_node(outedge.target)) if targets_are_class: invertible_stmts.append(n) elif strict: raise Exception(f"{pid} is not invertible") elif force_inversion: assert sm.get_node( outedge.target ).is_data_node, "Clearly the model is wrong, you have an inverse property to a literal node" invertible_stmts.append(n) # we have N statement, so we are going to have N! - 1 ways. It's literally a cartesian product all_choices = [] for stmt in invertible_stmts: # assume that each statement only has one incoming link! fix the for loop if this assumption doesn't hold (inedge,) = sm.incoming_edges(stmt.id) choice = [(stmt, None, None)] for invprop in self.wdprops[ self.get_prop_id(inedge.abs_uri) ].inverse_properties: choice.append((stmt, self.get_prop_uri(invprop), f"p:{invprop}")) all_choices.append(choice) n_choices = np.prod([len(c) for c in all_choices]) - 1 if n_choices > 256: raise sm_metrics.PermutationExplosion("Too many possible semantic models") all_choices = list(itertools.product(*all_choices)) assert all( invprop is None for _, invprop, _ in all_choices[0] ), "First choice is always the current semantic model" new_sms = [sm] for choice in all_choices[1:]: new_sm = sm.clone() # we now change the statement from original prop to use the inverse prop (change direction) # if the invprop is not None for stmt, invprop_abs_uri, invprop_rel_uri in choice: if invprop_abs_uri is None: continue readable_label = self.get_pnode_label(invprop_abs_uri) # assume that each statement only has one incoming link! fix the for loop if this assumption doesn't hold (inedge,) = sm.incoming_edges(stmt.id) # statement must have only one property (outedge,) = [ outedge for outedge in sm.outgoing_edges(stmt.id) if
# -*- coding: utf-8 -*- # # Copyright 2017 Inc # @author ipqhjjybj ''' 上传期货的数据 ''' import sys import os import six import numpy as np import pandas as pd sys.path.append("..") from config import * from connect import * from reportPyConfig import * upload_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # class spreadReportUploader(): # ''' # 上次详细回测报告 # ''' # @staticmethod # def uploadSpreadBacktestingAssessmentReport(filePath , strategyName): # try: # except Exception,ex: # print ex class reportUploader(): ''' 删除某一天之前的交易报告 ''' @staticmethod def deleteAllReportBeforeDay( use_date , clear_table_arr = ["`bascktesting_assessment_report`","`pushon_report`","`core_index_report`","`distribute_best_report`","`niuxiong_state_report`","`other_optimal_parameter_report`","`prom_diff_future_report`","`stability_report`","`strategyreportdict`"]): db = Stock.getDB('mysql') for table_name in clear_table_arr: sqll = "delete from %s where `create_date` <= '%s'" % (table_name , use_date) db.execute(sqll) db.commit() ''' 上传交易回测报告 ''' @staticmethod def uploadBacktestingAssessmentReport( filePath , strategyName ): # 2009/03/27---2017/03/27,2009/03/27---2017/03/27 try: f = open(filePath , "r") firstLine = "" flag = 0 ret = [] i = 0 for line in f: i = i + 1 line = line.strip() if flag == 0 : flag = 1 firstLine = line continue if i < 4: continue line = line.replace('1.$', '0.0') line = line.replace('-1.#J', '0.0') x = line.strip().split(',') if '---' in x: continue x = x[1] if '%' in x: x = x[:-1] if '---' in x : (withdraw_startdate , withdraw_enddate) = x.strip().split('---') ret.append(withdraw_startdate) ret.append(withdraw_enddate) else: ret.append(x) f.close() # rb,min15,2009/03/27--2016/07/27,3 4 5 1 1 1 1 1 1 1 10 10 10 0 10 10 1 10 1 (code , period , time_during , setValues) = firstLine.strip().split(',') arr = time_during.strip().split('--') start_date , end_date = arr ret = [strategyName , setValues , code , period , start_date , end_date , upload_date ] + ret ret = [str(x) for x in ret] fuck = [x for x in ret if '%' in x] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_bascktesting_assessment_report2 + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' 上传组合回测报告 ''' @staticmethod def uploadCombinationAssessmentReport( filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: line = line.strip() i = i + 1 if i == 1: firstLine = line if i < 2: continue content_arr.append(line) f.close() contents = '\n'.join(content_arr) ret = [strategyName , firstLine , contents] ret = [str(x) for x in ret] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_backtesting_combination_report + " (NULL," + line + ")" #print sqll db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' 上传交易回测报告 ''' @staticmethod def uploadPushOnBacktestingReport( filePath , strategyName ): # 2009/03/27---2017/03/27,2009/03/27---2017/03/27 try: f = open(filePath , "r") firstLine = "" flag = 0 ret = [] i = 0 for line in f: i = i + 1 line = line.strip() if flag == 0 : flag = 1 firstLine = line continue if i < 4: continue # line = line.replace('-1.#J', '0.0') # line = line.replace('1.#QO', '0.0') x = line.strip().split(',') if '---' in x: continue x = x[1] if '%' in x: x = x[:-1] if '---' in x : (withdraw_startdate , withdraw_enddate) = x.strip().split('---') ret.append(withdraw_startdate) ret.append(withdraw_enddate) else: ret.append(x) f.close() # rb,min15,2009/03/27--2016/07/27,3 4 5 1 1 1 1 1 1 1 10 10 10 0 10 10 1 10 1 (code , period , time_during , setValues) = firstLine.strip().split(',') arr = time_during.strip().split('--') start_date , end_date = arr ret = [strategyName , setValues , code , period , start_date , end_date , upload_date ] + ret ret = [str(x) for x in ret] fuck = [x for x in ret if '%' in x] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_bascktesting_assessment_report3 + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' 上传推进报告 ''' @staticmethod def uploadPushOnReport( filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: line = line.strip() i = i + 1 if i == 1: firstLine = line if i < 4: continue # line = line.replace('-1.#J', '0.0') # line = line.replace('1.#QO', '0.0') content_arr.append(line) f.close() contents = '\n'.join(content_arr) (code , period , time_during , setValues , ybn_months, ybw_months , tuijin_months ) = firstLine.strip().split(',') arr = time_during.strip().split('--') start_date , end_date = arr ret = [strategyName , setValues , code , period , start_date , end_date , upload_date , ybn_months, ybw_months , tuijin_months , contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_pushon_report + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' core_index_Report ''' @staticmethod def uploadCoreIndexReport( filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip().decode('utf-8') if i == 1: firstLine = line continue if i < 4 : continue # line = line.replace('-1.#J', '0.0') # line = line.replace('1.#QO', '0.0') content_arr.append(line) f.close() (code , period , time_during1 , time_during2 , setValues ) = firstLine.strip().split(',') (ybn_start_date , ybn_end_date) = time_during1.split('--') (ybw_start_date , ybw_end_date) = time_during2.split('--') contents = '\n'.join(content_arr) #contents = "" ret = [strategyName , setValues , code , period , ybn_start_date , ybn_end_date , ybw_start_date , ybw_end_date , upload_date , contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_core_index_report + " (NULL," + line + ")" #print sqll db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' distribute_best_report ''' @staticmethod def uploadDistributeBestReport( filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip().decode('utf-8') if i == 1: firstLine = line continue if i < 3: continue # line = line.replace('-1.#J', '0.0') # line = line.replace('1.#QO', '0.0') content_arr.append(line) f.close() (code , period , time_during1 , setValues) = firstLine.strip().split(',') (start_date , end_date) = time_during1.split('--') contents = '\n'.join(content_arr) ret = [strategyName , setValues , code , period , start_date , end_date , upload_date , contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_distribute_best_report + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' 牛熊阶段报告 ''' @staticmethod def uploadNiuxiong_state_report( filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip().decode('utf-8') if i == 1: firstLine = line if i < 3: continue content_arr.append(line) f.close() (code , period , setValues) = firstLine.strip().split(',') contents = '\n'.join(content_arr) ret = [strategyName , setValues , code , period , upload_date ,contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_niuxiong_state_report + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' 其他可选参数报告 ''' @staticmethod def uploadOtherOptiomalParameterReport( filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip().decode('utf-8') if i == 1: firstLine = line if i < 3: continue content_arr.append(line) f.close() (code , period , time_during1 , setValues) = firstLine.strip().split(',') (start_date , end_date) = time_during1.split('--') contents = '\n'.join(content_arr) ret = [strategyName , setValues , code , period , start_date , end_date , upload_date , contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_other_optimal_report + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' prom报告 ''' @staticmethod def uploadPromDiffFutureReport(filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip().decode('utf-8') if i == 1: firstLine = line if i < 4: continue content_arr.append(line) f.close() (code , period , time_during1 , setValues) = firstLine.strip().split(',') (start_date , end_date) = time_during1.split('--') contents = '\n'.join(content_arr) ret = [strategyName , setValues , code , period , start_date , end_date , upload_date , contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_prom_diff_future_report + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' 上次交易交易收益报告 ''' @staticmethod def uploadTradingResultFigure(filePath , strategyName): try: f = open(filePath , "r") firstLine = "" flag = 0 ret = [] i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip() if flag == 0: flag = 1 firstLine = line continue if i < 2: continue content_arr.append(line) f.close() try: (code , period , time_during1 , setValues) = firstLine.strip().split(',') (start_date , end_date) = time_during1.split('--') except Exception,ex: (code , period , time_during1 , setValues) = ["","","",""] (start_date , end_date) = ["",""] contents = '\n'.join(content_arr) ret = [strategyName , setValues , code , period , start_date , end_date , upload_date , contents] ret = ["'" + x + "'" for x in ret] line = ','.join(ret) sqll = config_upload_best_figure + " (NULL," + line + ")" db = Stock.getDB('mysql') db.execute(sqll) db.commit() n_id = db.lastRowID() return n_id except Exception,ex: print ex print "Error in %s %s " % (filePath , strategyName) return 0 ''' stability report ''' @staticmethod def uploadStability_report(filePath , strategyName): try: f = open(filePath , "r") firstLine = "" i = 0 content_arr = [] for line in f: i = i + 1 line = line.strip().decode('utf-8') if i == 1: firstLine = line if i
<gh_stars>0 #!/usr/bin/python # -*- coding: utf-8 -*- # Software License Agreement (BSD License) # # Copyright (c) 2009-2011, Eucalyptus Systems, Inc. # All rights reserved. # # Redistribution and use of this software in source and binary forms, with or # without modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # # Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other # materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Author: <EMAIL> __version__ = '0.0.10' import re import os import random import time import string import socket import sys import traceback import StringIO import eulogger import hashlib import types import operator import fcntl import struct import subprocess import termios from functools import wraps class TimeoutFunctionException(Exception): """Exception to raise on a timeout""" pass class Eutester(object): # Force ansi escape sequences (markup) in output. # This can also be set as an env var _EUTESTER_FORCE_ANSI_ESCAPE = False # Allow ansi color codes outside the standard range. For example some systems support # a high intensity color range from 90-109. # This can also be set as an env var _EUTESTER_NON_STANDARD_ANSI_SUPPORT = False def __init__(self, credpath=None): """This class is intended to setup boto connections for the various services that the *ops classes will use. :param credpath: Path to a valid eucarc file. :param aws_access_key_id: Used in conjuction with aws_secret_access_key allows for creation of connections without needing a credpath. :param aws_secret_access_key: Used in conjuction with aws_access_key_id allows for creation of connections without needing a credpath. :rtype: :class:`eutester.Eutester` or ``None`` :returns: A Eutester object with all connections that were able to be created. Currently EC2, S3, IAM, and STS. """ ### Default values for configuration self.credpath = credpath ### Eutester logs self.logger = eulogger.Eulogger(identifier="EUTESTER") self.debug = self.logger.log.debug self.critical = self.logger.log.critical self.info = self.logger.log.info ### LOGS to keep for printing later self.fail_log = [] self.running_log = self.logger.log ### Pull the access and secret keys from the eucarc or use the ones provided to the constructor if self.credpath is not None: self.debug("Extracting keys from " + self.credpath) self.aws_access_key_id = self.get_access_key() self.aws_secret_access_key = self.get_secret_key() self.account_id = self.get_account_id() self.user_id = self.get_user_id() @property def ec2_certpath(self): try: return self.parse_eucarc('EC2_CERT') except ValueError: return None @property def ec2_cert(self): certpath = self.ec2_certpath if certpath and os.path.exists(certpath): out = self.local('cat {0}'.format(certpath)) return str("\n".join(out)).strip() return None @property def ec2_private_key_path(self): try: return self.parse_eucarc('EC2_PRIVATE_KEY') except ValueError: return None @property def ec2_private_key(self): keypath = self.ec2_private_key_path if keypath and os.path.exists(keypath): out = self.local('cat {0}'.format(keypath)) return "\n".join(out) return None def get_access_key(self): if not self.aws_access_key_id: """Parse the eucarc for the EC2_ACCESS_KEY""" self.aws_access_key_id = self.parse_eucarc("EC2_ACCESS_KEY") return self.aws_access_key_id def get_secret_key(self): if not self.aws_secret_access_key: """Parse the eucarc for the EC2_SECRET_KEY""" self.aws_secret_access_key = self.parse_eucarc("EC2_SECRET_KEY") return self.aws_secret_access_key def get_account_id(self): if not self.account_id: """Parse the eucarc for the EC2_ACCOUNT_NUMBER""" self.account_id = self.parse_eucarc("EC2_ACCOUNT_NUMBER") return self.account_id def get_user_id(self): if not self.user_id: self.user_id = self.parse_eucarc("EC2_USER_ID") """Parse the eucarc for the EC2_ACCOUNT_NUMBER""" return self.user_id def get_port(self): """Parse the eucarc for the EC2_ACCOUNT_NUMBER""" ec2_url = self.parse_eucarc("EC2_URL") return ec2_url.split(':')[1].split("/")[0] def parse_eucarc(self, field): if self.credpath is None: raise RuntimeError('Credpath has not been set yet. ' 'Please set credpath or provide ' 'configuration file') cmd = 'bash -c \'source {0}/eucarc &> /dev/null && echo ${1}\''.format(self.credpath, field) out = self.local(cmd) if out[0]: return out[0] else: if out: out = "\n".join(out) self.critical('Failed to find field: {0},\nCommand:{1}\nReturned:\n"{2}"' .format(field, cmd, out)) try: catcmd = 'cat {0}/eucarc | grep {1}'.format(self.credpath, field) catout = self.local(catcmd) catout = "\n".join(catout) except Exception, ce: catout = "Command failed:{0}, err:{1}".format(catcmd, ce) self.critical("Unable to find {0} id in eucarc. {1}:\n{2}\n" .format(field, catcmd, catout)) raise ValueError("Unable to find " + field + " id in eucarc") def handle_timeout(self, signum, frame): raise TimeoutFunctionException() def local(self, cmd, shell=True): """ Run a command on the localhost :param cmd: str representing the command to be run :return: :raise: CalledProcessError on non-zero return code """ args = cmd process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=4096, shell=shell) output, unused_err = process.communicate() retcode = process.poll() if retcode: self.debug('CMD:"{0}"\nOUTPUT:\n{1}'.format(cmd, str(output))) error = subprocess.CalledProcessError(retcode, cmd) error.output = output raise error return output.split("\n") def found(self, command, regex): """ Returns a Boolean of whether the result of the command contains the regex """ result = self.local(command) for line in result: found = re.search(regex,line) if found: return True return False def ping(self, address, poll_count = 10): """ Ping an IP and poll_count times (Default = 10) address Hostname to ping poll_count The amount of times to try to ping the hostname iwth 2 second gaps in between """ if re.search("0.0.0.0", address): self.critical("Address is all 0s and will not be able to ping it") return False self.debug("Attempting to ping " + address) for x in xrange(0, poll_count): try: self.local("ping -c 1 " + address) self.debug("Was able to ping address") return True except subprocess.CalledProcessError as CPE: self.debug('Output:' + str(CPE.output)) self.debug('Ping attempt {0}/{1} failed, err:{2}' .format(x, poll_count, str(CPE))) self.sleep(2) self.critical("Was unable to ping address") return False @classmethod def markup(cls, text, markups=[1], resetvalue="\033[0m", force=None, allow_nonstandard=None): """ Convenience method for using ansi markup. Attempts to check if terminal supports ansi escape sequences for text markups. If so will return a marked up version of the text supplied using the markups provided. Some example markeups: 1 = bold, 4 = underline, 94 = blue or markups=[1, 4, 94] :param text: string/buffer to be marked up :param markups: a value or list of values representing ansi codes. :param resetvalue: string used to reset the terminal, default: "\33[0m" :param force: boolean, if set will add escape sequences regardless of tty. Defaults to the class attr '_EUTESTER_FORCE_ANSI_ESCAPE' or the env variable: 'EUTESTER_FORCE_ANSI_ESCAPE' if it is set. :param allow_nonstandard: boolean, if True all markup values will be used. If false the method will attempt to remap the markup value to a standard ansi value to support tools such as Jenkins, etc. Defaults to the class attr '._EUTESTER_NON_STANDARD_ANSI_SUPPORT' or the environment variable 'EUTESTER_NON_STANDARD_ANSI_SUPPORT' if set. returns a string with the provided 'text' formatted within ansi escape sequences """ if not isinstance(markups, list): markups = [markups] if force is None: force = os.environ.get('EUTESTER_FORCE_ANSI_ESCAPE', cls._EUTESTER_FORCE_ANSI_ESCAPE) if str(force).upper() == 'TRUE': force = True else: force = False if allow_nonstandard is None: allow_nonstandard = os.environ.get('EUTESTER_NON_STANDARD_ANSI_SUPPORT', cls._EUTESTER_NON_STANDARD_ANSI_SUPPORT) if str(allow_nonstandard).upper() == 'TRUE': allow_nonstandard = True else: allow_nonstandard = False if not force: if not (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()): return text if not allow_nonstandard: newmarkups = [] for markup in markups: if markup > 90: newmarkups.append(markup-60) else: newmarkups.append(markup) markups = newmarkups lines = [] markupvalues=";".join(str(x) for x in markups) for line in text.splitlines(): lines.append("\033[{0}m{1}\033[0m".format(markupvalues, line)) buf = "\n".join(lines) if text.endswith('\n') and not buf.endswith('\n'): buf += '\n' return buf def scan_port_range(self, ip, start, stop, timeout=1, tcp=True): ''' Attempts to connect to ports, returns list of ports which accepted a connection ''' ret = [] for x in xrange(start,stop+1): try: sys.stdout.write("\r\x1b[K"+str('scanning:'+str(x))) sys.stdout.flush() self.test_port_status(ip, x, timeout=timeout,tcp=tcp, verbose=False) ret.append(x) except socket.error, se: pass return ret def test_port_status(self, ip, port, timeout=5, tcp=True, recv_size=0, send_buf=None, verbose=True): ''' Attempts to connect to tcp port at ip:port within timeout seconds ''' ret_buf = "" if verbose: debug = self.debug else: debug = lambda msg: None debug('test_port_status, ip:'+str(ip)+', port:'+str(port)+', TCP:'+str(tcp)) if tcp: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) else:
<reponame>ambarqadeer/ALOPqadeer """ VERSION 4 Feb 2017 Compatible with python3 and Numpy 1.12 - <NAME> """ from astropy.io import fits as pyfits import math import numpy import sys import scipy.ndimage import re def regextract(filename, comments=False): """ Converts ds9 region files to become usable by the aper function. INPUTS: filename -- input ds9 regions file array. The ds9 file must be saved in physical coordinates. In DS9: Region->Save Regions [Choose destination/filename.reg and press OK] Format=ds9 Coordinate System=physical [OK] OPTIONAL INPUTS: comments -- if comments=True then all circles must have comments. (Default = False) OUTPUTS: The output is an array of strings containing the values as shown below. This is done to enable the use of string names in comments. Even when comments are turned off, the format is kept to keep the format consistent. The format is 3xn if comments=False and 4xn if comments=True Array -- ['x','y','radius','comment'] EXAMPLE: Convert the following region file into python format reg.ds9 contains: ================ # Region file format: DS9 version 4.1 global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1 physical circle(2763.4747,3175.7129,29.0882) # text={1} circle(2860.7076,3094.7166,25.0868) # text={text} ================ Then calling: regions = regextract('reg.ds9', comment=True) regions then gives: array([['2763.4747','3175.7129','29.0882', '1'], ['860.7076','3094.7166','25.0868', 'text'],], dtype='|S32') If the array does not contain text, setting it to be a float array is done by simply saying array.dtype = float REVISION HISTORY: Created by <NAME> 22 Apr 2015 Changed to give an array of floats - <NAME> 6 May 2015 Made compatible with numpy 1.12 - <NAME> February 2017 """ array = numpy.array([]) array2 = numpy.array([]) regions = numpy.genfromtxt(filename, skip_header=3, comments='@', delimiter='\n' ,dtype='str') #print(regions) for line in regions: #for line in regions.split("\n"): array = numpy.append(array, numpy.array([str(x) for x in re.findall(r"\d+(?:\.\d+)?(?=[^()\n]*\))", line)])) if comments == True: array2 = numpy.array([str(x) for x in re.findall(r"(?<=\{)[^}]+(?=\})", line)]) array = numpy.append(array, array2) if comments == True: array = array.reshape(len(array)/4,4) x = array[:,0].astype(numpy.float) y = array[:,1].astype(numpy.float) r = array[:,2].astype(numpy.float) comments = array[:,3] return x,y,r,comments else: array = array.reshape(int(len(array)/3),3).astype(numpy.float) x = array[:,0] y = array[:,1] r = array[:,2] return x,y,r def meanclip(image,mean,sigma,clipsig=3.,maxiter=5.,converge_num=0.02,verbose=False): """ NAME: MEANCLIP PURPOSE: Computes an iteratively sigma-clipped mean on a data set EXPLANATION: Clipping is done about median, but mean is returned. CATEGORY: Statistics CALLING SEQUENCE: [mean,sigma]=MEANCLIP( image,mean,sigma, SUBS= CLIPSIG=, MAXITER=, CONVERGE_NUM=, VERBOSE=False, DOUBLE=False ) INPUT POSITIONAL PARAMETERS: image: Input data, any numeric array OUTPUT POSITIONAL PARAMETERS: Mean: N-sigma clipped mean. Sigma: Standard deviation of remaining pixels. INPUT KEYWORD PARAMETERS: CLIPSIG=3: Number of sigma at which to clip. Default=3 MAXITER=5: Ceiling on number of clipping iterations. Default=5 CONVERGE_NUM=0.02: If the proportion of rejected pixels is less than this fraction, the iterations stop. Default=0.02, i.e., iteration stops if fewer than 2% of pixels excluded. VERBOSE=False: Set this flag to get messages. DOUBLE=False - if set then perform all computations in double precision. Otherwise double precision is used only if the input data is double OUTPUT KEYWORD PARAMETER: SUBS: Subscript array for pixels finally used. [not functional] MODIFICATION HISTORY: Written by: RSH, RITSS, 21 Oct 98 20 Jan 99 - Added SUBS, fixed misplaced paren on float call, improved doc. RSH Nov 2005 Added /DOUBLE keyword, check if all pixels are removed by clipping <NAME> Nov 2012 converted to python G.P.P.L. Otten Feb 2015 removed by reference last=ct G.P.P.L. Otten """ image=numpy.ravel(image) imagenumbers=numpy.arange(numpy.size(image)) subs=imagenumbers[numpy.isfinite(image)] ct=numpy.sum(numpy.isfinite(image)) iternr=0 for iternr2 in numpy.arange(maxiter+1): #while iternr <= maxiter: skpix=image[subs] #print numpy.sum(skpix) #print numpy.size(skpix) iternr=iternr+1 lastct=ct*1. medval=numpy.median(skpix) mean=numpy.mean(skpix,dtype=numpy.float64) sig=numpy.std(skpix,ddof=1,dtype=numpy.float64) wsm = (abs(skpix-medval) < clipsig*sig) ct=numpy.sum(wsm) if ct > 0: subs=subs[wsm] #print iternr if (iternr > maxiter) | (ct == 0) | (((abs(ct-lastct))/lastct) <= converge_num): break skpix=image[subs] mean=numpy.mean(skpix,dtype=numpy.float64) sig=numpy.std(skpix,ddof=1,dtype=numpy.float64) return [mean,sig] def aper(image,xc,yc,phpadu,apr,skyrad,badpix=[0,0],prnt=False, silent=False, flux=False, exact = False, nan = False, setskyval = [], readnoise = [], meanback = False, clipsig=3., maxiter=5.,converge_num=0.02, minsky = 20.): """Performs aperture photometry on stars INPUTS: image -- input image array xc -- vector of x coordinates. yc -- vector of y coordinates. phpadu -- Photons per Analog Digital Units, numeric scalar. Converts the data numbers in IMAGE to photon units. (APER assumes Poisson statistics.) COMMENT BY GILLES: phpadu seems to do very little and only scales the error on the flux apr -- Vector of up to 12 REAL photometry aperture radii. skyrad -- Two element vector giving the inner and outer radii to be used for the sky annulus. Ignored if the SETSKYVAL keyword is set. badpix -- Two element vector giving the minimum and maximum value of a good pixel. If badpix is not supplied or if BADPIX[0] is equal to BADPIX[1] then it is assumed that there are no bad pixels. Note that fluxes will not be computed for any star with a bad pixel within the aperture area, but that bad pixels will be simply ignored for the sky computation. The BADPIX parameter is ignored if the /NAN keyword is set. OPTIONAL INPUTS: clipsig -- if meanback == True, then this is the number of sigma at which to clip the background. (default=3) converge_num -- if meanback == True then if the proportion of rejected pixels is less than this fraction, the iterations stop. (default=0.02, i.e., iteration stops if fewer than 2% of pixels excluded.) exact -- By default, APER counts subpixels, but uses a polygon approximation for the intersection of a circular aperture with a square pixel (and normalizes the total area of the sum of the pixels to exactly match the circular area). If the /EXACT keyword, then the intersection of the circular aperture with a square pixel is computed exactly. The /EXACT keyword is much slower and is only needed when small (~2 pixels) apertures are used with very undersampled data. (default = False) flux -- By default, APER uses a magnitude system where a magnitude of 25 corresponds to 1 flux unit. If set, then APER will keep results in flux units instead of magnitudes. (default = False) maxiter -- if meanback == True then this is the ceiling on number of clipping iterations of the background. (default=5) meanback -- if set, then the background is computed using the 3 sigma clipped mean (using meanclip.pro) rather than using the mode computed with mmm.pro. This keyword is useful for the Poisson count regime or where contamination is known to be minimal. (default False) minsky -- Integer giving mininum number of sky values to be used with MMM APER will not compute a flux if fewer valid sky elements are within the sky annulus. (default = 20) nan -- If set then APER will check for NAN values in the image. /NAN takes precedence over the BADPIX parameter. Note that fluxes will not be computed for any star with a NAN pixel within the aperture area, but that NAN pixels will be simply ignored for the sky computation. (default = False) prnt -- if set and non-zero then APER will also write its results to a file aper.prt. One can specify the output file name by setting PRNT = 'filename'. (default = False) [DOES NOT FUNCTION - Gilles] readnoise -- Scalar giving the read noise (or minimum noise for any pixel. This value is passed to the procedure mmm.pro when computing the sky, and is only need for images where the noise is low, and pixel values are quantized. silent - If supplied and non-zero then no output is displayed to the terminal. (default = False) setskyval -- Use this keyword to force the sky to a specified value rather than have APER compute a sky value. SETSKYVAL can either be a scalar specifying the sky value to use for all sources, or a 3 element vector specifying the sky value, the sigma of the sky value, and the number of elements used to compute a sky value. The 3 element form of SETSKYVAL is needed for accurate error budgeting. OUTPUTS: mags - NAPER by NSTAR array giving the magnitude for each star in each aperture. (NAPER is the number of apertures, and NSTAR is the number of stars). If flux == False, then a
= "accounting-probes", type = OPTTYPE_STRING, required = False, doc = """ Accounting probes. There are four built-in probes: - AR: Collects information on AR leases. - best-effort: Collects information on best effort leases. - immediate: Collects information immediate leases. - utilization: Collects information on resource utilization. See the Haizea documentation for details on how to write your own accounting probes. """), Option(name = "attributes", getter = "attributes", type = OPTTYPE_STRING, required = False, doc = """ This option is used internally by Haizea when using multiconfiguration files. See the multiconfiguration documentation for more details. """) ] sections.append(accounting) # ============================= # # # # DEPLOYMENT OPTIONS # # (w/ image transfers) # # # # ============================= # imgtransfer = Section("deploy-imagetransfer", required=False, required_if = [(("general","lease-deployment"),"imagetransfer")], doc = """ When lease deployment with disk image transfers is selected, this section is used to control image deployment parameters.""") imgtransfer.options = \ [ Option(name = "transfer-mechanism", getter = "transfer-mechanism", type = OPTTYPE_STRING, required = True, valid = [constants.TRANSFER_UNICAST, constants.TRANSFER_MULTICAST], doc = """ Specifies how disk images are transferred. Valid values are: - unicast: A disk image can be transferred to just one node at a time - multicast: A disk image can be multicast to multiple nodes at the same time. """), Option(name = "avoid-redundant-transfers", getter = "avoid-redundant-transfers", type = OPTTYPE_BOOLEAN, required = False, default = True, doc = """ Specifies whether the scheduler should take steps to detect and avoid redundant transfers (e.g., if two leases are scheduled on the same node, and they both require the same disk image, don't transfer the image twice; allow one to "piggyback" on the other). There is generally no reason to set this option to False. """), Option(name = "force-imagetransfer-time", getter = "force-imagetransfer-time", type = OPTTYPE_TIMEDELTA, required = False, doc = """ Forces the image transfer time to a specific amount. This options is intended for testing purposes. """), Option(name = "diskimage-reuse", getter = "diskimage-reuse", type = OPTTYPE_STRING, required = False, required_if = None, default = constants.REUSE_NONE, valid = [constants.REUSE_NONE, constants.REUSE_IMAGECACHES], doc = """ Specifies whether disk image caches should be created on the nodes, so the scheduler can reduce the number of transfers by reusing images. Valid values are: - none: No image reuse - image-caches: Use image caching algorithm described in Haizea publications """), Option(name = "diskimage-cache-size", getter = "diskimage-cache-size", type = OPTTYPE_INT, required = False, required_if = [(("deploy-imagetransfer","diskimage-reuse"),True)], doc = """ Specifies the size (in MB) of the disk image cache on each physical node. """) ] sections.append(imgtransfer) # ============================= # # # # TRACEFILE OPTIONS # # # # ============================= # tracefile = Section("tracefile", required=False, doc=""" When reading in requests from a tracefile, this section is used to specify the tracefile and other parameters.""") tracefile.options = \ [ Option(name = "tracefile", getter = "tracefile", type = OPTTYPE_STRING, required = True, doc = """ Path to tracefile to use. """), Option(name = "imagefile", getter = "imagefile", type = OPTTYPE_STRING, required = False, doc = """ Path to list of images to append to lease requests. If omitted, the images in the tracefile are used. """), Option(name = "injectionfile", getter = "injectionfile", type = OPTTYPE_STRING, required = False, doc = """ Path to file with leases to "inject" into the tracefile. """), Option(name = "runtime-slowdown-overhead", getter = "runtime-slowdown-overhead", type = OPTTYPE_FLOAT, required = False, default = 0, doc = """ Adds a runtime overhead (in %) to the lease duration. """), Option(name = "add-overhead", getter = "add-overhead", type = OPTTYPE_STRING, required = False, default = constants.RUNTIMEOVERHEAD_NONE, valid = [constants.RUNTIMEOVERHEAD_NONE, constants.RUNTIMEOVERHEAD_ALL, constants.RUNTIMEOVERHEAD_BE], doc = """ Specifies what leases will have a runtime overhead added: - none: No runtime overhead must be added. - besteffort: Add only to best-effort leases - all: Add runtime overhead to all leases """), Option(name = "bootshutdown-overhead", getter = "bootshutdown-overhead", type = OPTTYPE_TIMEDELTA, required = False, default = TimeDelta(seconds=0), doc = """ Specifies how many seconds will be alloted to boot and shutdown of the lease. """), Option(name = "override-memory", getter = "override-memory", type = OPTTYPE_INT, required = False, default = constants.NO_MEMORY_OVERRIDE, doc = """ Overrides memory requirements specified in tracefile. """), ] sections.append(tracefile) # ============================= # # # # OPENNEBULA OPTIONS # # # # ============================= # opennebula = Section("opennebula", required=False, required_if = [(("general","mode"),"opennebula")], doc = """ This section is used to specify OpenNebula parameters, necessary when using Haizea as an OpenNebula scheduling backend.""") opennebula.options = \ [ Option(name = "host", getter = "one.host", type = OPTTYPE_STRING, required = True, doc = """ Host where OpenNebula is running. Typically, OpenNebula and Haizea will be installed on the same host, so the following option should be set to 'localhost'. If they're on different hosts, make sure you modify this option accordingly. """), Option(name = "port", getter = "one.port", type = OPTTYPE_INT, required = False, default = defaults.OPENNEBULA_RPC_PORT, doc = """ TCP port of OpenNebula's XML RPC server """), Option(name = "stop-when-no-more-leases", getter = "stop-when-no-more-leases", type = OPTTYPE_BOOLEAN, required = False, default = False, doc = """ This option is useful for testing and running experiments. If set to True, Haizea will stop when there are no more leases to process (which allows you to tun Haizea and OpenNebula unattended, and count on it stopping when there are no more leases to process). For now, this only makes sense if you're seeding Haizea with requests from the start (otherwise, it will start and immediately stop). """), Option(name = "dry-run", getter = "dry-run", type = OPTTYPE_BOOLEAN, required = False, default = False, doc = """ This option is useful for testing. If set to True, Haizea will fast-forward through time (note that this is different that using the simulated clock, which has to be used with a tracefile; with an Haizea/OpenNebula dry run, you will have to seed OpenNebula with requests before starting Haizea). You will generally want to set stop-when-no-more-leases when doing a dry-run. IMPORTANT: Haizea will still send out enactment commands to OpenNebula. Make sure you replace onevm with a dummy command that does nothing (or that reacts in some way you want to test; e.g., by emulating a deployment failure, etc.) """), ] sections.append(opennebula) def __init__(self, config): Config.__init__(self, config, self.sections) self.attrs = {} if self._options["attributes"] != None: attrs = self._options["attributes"].split(",") for attr in attrs: (k,v) = attr.split("=") self.attrs[k] = v def get_attr(self, attr): return self.attrs[attr] def get_attrs(self): return self.attrs.keys() class HaizeaMultiConfig(Config): MULTI_SEC = "multi" COMMON_SEC = "common" TRACEDIR_OPT = "tracedir" TRACEFILES_OPT = "tracefiles" INJDIR_OPT = "injectiondir" INJFILES_OPT = "injectionfiles" DATADIR_OPT = "datadir" def __init__(self, config): # TODO: Define "multi" section as a Section object Config.__init__(self, config, []) def get_profiles(self): sections = set([s.split(":")[0] for s in self.config.sections()]) # Remove multi and common sections sections.difference_update([self.COMMON_SEC, self.MULTI_SEC]) return list(sections) def get_trace_files(self): dir = self.config.get(self.MULTI_SEC, self.TRACEDIR_OPT) traces = self.config.get(self.MULTI_SEC, self.TRACEFILES_OPT).split() return [dir + "/" + t for t in traces] def get_inject_files(self): dir = self.config.get(self.MULTI_SEC, self.INJDIR_OPT) inj = self.config.get(self.MULTI_SEC, self.INJFILES_OPT).split() inj = [dir + "/" + i for i in inj] inj.append(None) return inj def get_configs(self): profiles = self.get_profiles() tracefiles = self.get_trace_files() injectfiles = self.get_inject_files() configs = [] for profile in profiles: for tracefile in tracefiles: for injectfile in injectfiles: profileconfig = ConfigParser.ConfigParser() commonsections = [s for s in self.config.sections() if s.startswith("common:")] profilesections = [s for s in self.config.sections() if s.startswith(profile +":")] sections = commonsections + profilesections for s in sections: s_noprefix = s.split(":")[1] items = self.config.items(s) if not profileconfig.has_section(s_noprefix): profileconfig.add_section(s_noprefix) for item in items: profileconfig.set(s_noprefix, item[0], item[1]) # The tracefile section may have not been created if not profileconfig.has_section("tracefile"): profileconfig.add_section("tracefile") # Add tracefile option profileconfig.set("tracefile", "tracefile",
<reponame>oxu2/flyingsquid from pgmpy.models import MarkovModel from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor from itertools import combinations from flyingsquid.helpers import * from flyingsquid import _triplets from flyingsquid import _graphs from flyingsquid import _observables from flyingsquid import _lm_parameters import numpy as np import math from tqdm import tqdm import sys import random class LabelModel(_triplets.Mixin, _graphs.Mixin, _observables.Mixin, _lm_parameters.Mixin): def __init__(self, m, v=1, y_edges=[], lambda_y_edges=[], lambda_edges=[], allow_abstentions=True, triplets=None, triplet_seed=0): '''Initialize the LabelModel with a graph G. m: number of LF's v: number of Y tasks y_edges: edges between the tasks. (i, j) in y_edges means that there is an edge between y_i and y_j. lambda_y_edges: edges between LF's and tasks. (i, j) in lambda_y_edges means that there is an edge between lambda_i and y_j. If this list is empty, assume that all labeling functions are connected to Y_0. lambda_edges: edges between LF's. (i, j) in lambda_edges means that there is an edge between lambda_i and lambda_j. allow_abstentions: if True, allow abstentions in L_train. triplets: if specified, use these triplets triplet_seed: if triplets not specified, randomly shuffle the nodes with this seed when generating triplets ''' if lambda_y_edges == []: lambda_y_edges = [(i, 0) for i in range(m)] G = MarkovModel() # Add LF nodes G.add_nodes_from([ 'lambda_{}'.format(i) for i in range(m) ]) G.add_nodes_from([ 'Y_{}'.format(i) for i in range(v) ]) # Add edges G.add_edges_from([ ('Y_{}'.format(start), 'Y_{}'.format(end)) for start, end in y_edges ]) G.add_edges_from([ ('lambda_{}'.format(start), 'Y_{}'.format(end)) for start, end in lambda_y_edges ]) G.add_edges_from([ ('lambda_{}'.format(start), 'lambda_{}'.format(end)) for start, end in lambda_edges ]) self.fully_independent_case = lambda_edges == [] self.m = m if m < 3: raise NotImplementedError("Triplet method needs at least three LF's to run.") self.v = v self.G = G self.junction_tree = self.G.to_junction_tree() self.nodes = sorted(list(self.G.nodes)) self.triplet_seed = triplet_seed if triplet_seed is not None: random.seed(triplet_seed) random.shuffle(self.nodes) self.separator_sets = set([ tuple(sorted(list((set(clique1).intersection(set(clique2)))))) for clique1, clique2 in self.junction_tree.edges ]) self.allow_abstentions = allow_abstentions self.triplets = triplets if not self._check(): raise NotImplementedError('Cannot run triplet method for specified graph.') # Make this Picklable def save(obj): return (obj.__class__, obj.__dict__) def load(cls, attributes): obj = cls.__new__(cls) obj.__dict__.update(attributes) return obj def enumerate_ys(self): # order to output probabilities vals = { Y: (-1, 1) for Y in range(self.v) } Y_vecs = sorted([ [ vec_dict[Y] for Y in range(self.v) ] for vec_dict in dict_product(vals) ]) return Y_vecs def _lambda_pass(self, L_train, lambda_marginals, lambda_moment_vals, lambda_equals_one, lambda_zeros, abstention_probabilities, verbose = False): ''' Make the pass over L_train. In this pass, we need to: * Compute all the joint marginal distributions over multiple lambda's (lambda_marginals) * Compute the probabilities that some set of lambda's are all equal to zero (lambda_zeros) * Compute all the lambda moments, including conditional moments (lambda_moment_vals) * Compute the probability that the product of some lambdas is zero (abstention_probabilities) ''' # do the fast cases first easy_marginals = { marginal: None for marginal in lambda_marginals if len(marginal) == 1 } easy_moments = { moment: None for moment in lambda_moment_vals if type(moment[0]) != type(()) and len(moment) <= 2 } easy_equals_one = { factor: None for factor in lambda_equals_one if type(factor[0]) != type(()) and len(factor) == 1 } easy_zeros = { condition: None for condition in lambda_zeros if len(condition) == 1 } easy_abstention_probs = { factor: None for factor in abstention_probabilities if len(factor) == 1 } means = np.einsum('ij->j', L_train)/L_train.shape[0] covariance = np.einsum('ij,ik->jk', L_train, L_train)/L_train.shape[0] lf_cardinality = 3 if self.allow_abstentions else 2 lf_values = (-1, 0, 1) if self.allow_abstentions else (-1, 1) for marginal in easy_marginals: idx = marginal[0] counts = [ np.sum(L_train[:,idx] == val) / L_train.shape[0] for val in lf_values ] easy_marginals[marginal] = JointProbabilityDistribution( [ 'lambda_{}'.format(idx) ], [ lf_cardinality ], counts ) if marginal in easy_equals_one: easy_equals_one[marginal] = counts[-1] if marginal in easy_zeros: easy_zeros[marginal] = counts[1] if marginal in easy_abstention_probs: easy_abstention_probs[marginal] = counts[1] for moment in easy_moments: if len(moment) == 1: easy_moments[moment] = means[moment[0]] else: easy_moments[moment] = covariance[moment[0]][moment[1]] for factor in easy_equals_one: if easy_equals_one[factor] is None: easy_equals_one[factor] = np.sum(L_train[:,factor[0]] == 1) / L_train.shape[0] for condition in easy_zeros: if easy_zeros[condition] is None: idx = condition[0] easy_zeros[condition] = np.sum(L_train[:,idx] == 0) / L_train.shape[0] for factor in easy_abstention_probs: if easy_abstention_probs[factor] is None: idx = factor[0] easy_abstention_probs[factor] = np.sum(L_train[:,idx] == 0) / L_train.shape[0] # time for the remaining cases lambda_marginals = { key: lambda_marginals[key] for key in lambda_marginals if key not in easy_marginals } lambda_moment_vals = { key: lambda_moment_vals[key] for key in lambda_moment_vals if key not in easy_moments } lambda_equals_one = { key: lambda_equals_one[key] for key in lambda_equals_one if key not in easy_equals_one } lambda_zeros = { key: lambda_zeros[key] for key in lambda_zeros if key not in easy_zeros } abstention_probabilities = { key: abstention_probabilities[key] for key in abstention_probabilities if key not in easy_abstention_probs } # for the rest, loop through L_train if (len(lambda_marginals) > 0 or len(lambda_moment_vals) > 0 or len(lambda_equals_one) > 0 or len(lambda_zeros) > 0 or len(abstention_probabilities) > 0): # figure out which lambda states we need to keep track of lambda_marginal_counts = {} lambda_marginal_vecs = {} lf_values = (-1, 0, 1) if self.allow_abstentions else (-1, 1) for lambda_marginal in lambda_marginals: nodes = [ 'lambda_{}'.format(idx) for idx in lambda_marginal ] vals = { lf: lf_values for lf in nodes } lf_vecs = sorted([ [ vec_dict[lf] for lf in nodes ] for vec_dict in dict_product(vals) ]) counts = { tuple(lf_vec): 0 for lf_vec in lf_vecs } lambda_marginal_vecs[lambda_marginal] = lf_vecs lambda_marginal_counts[lambda_marginal] = counts lambda_moment_counts = { moment: 0 for moment in lambda_moment_vals } lambda_moment_basis = { moment: 0 for moment in lambda_moment_vals } lambda_equals_one_counts = { factor: 0 for factor in lambda_equals_one } lambda_equals_one_basis = { factor: 0 for factor in lambda_equals_one } lambda_zero_counts = { condition: 0 for condition in lambda_zeros } abstention_probability_counts = { factor: 0 for factor in abstention_probabilities } for data_point in tqdm(L_train) if verbose else L_train: for marginal in lambda_marginals: mask = [ data_point[idx] for idx in marginal ] lambda_marginal_counts[marginal][tuple(mask)] += 1 for moment in lambda_moment_vals: if type(moment[0]) == type(()): pos_mask = [ data_point[idx] for idx in moment[0] ] zero_mask = [ data_point[idx] for idx in moment[1] ] if np.count_nonzero(zero_mask) == 0: lambda_moment_basis[moment] += 1 lambda_moment_counts[moment] += np.prod(pos_mask) else: mask = [ data_point[idx] for idx in moment ] lambda_moment_counts[moment] += np.prod(mask) lambda_moment_basis[moment] += 1 for factor in lambda_equals_one: if type(factor[0]) == type(()): pos_mask = [ data_point[idx] for idx in factor[0] ] zero_mask = [ data_point[idx] for idx in factor[1] ] if np.count_nonzero(zero_mask) == 0: lambda_equals_one_basis[factor] += 1 if np.prod(pos_mask) == 1: lambda_equals_one_counts[factor] += 1 else: mask = [ data_point[idx] for idx in factor ] if np.prod(mask) == 1: lambda_equals_one_counts[factor] += 1 lambda_equals_one_basis[factor] += 1 for zero_condition in lambda_zeros: zero_mask = [ data_point[idx] for idx in zero_condition ] if np.count_nonzero(zero_mask) == 0: lambda_zero_counts[zero_condition] += 1 for factor in abstention_probability_counts: zero_mask = [ data_point[idx] for idx in factor ] if np.prod(zero_mask) == 0: abstention_probability_counts[factor] += 1 lf_cardinality = 3 if self.allow_abstentions else 2 for marginal in lambda_marginals: nodes = [ 'lambda_{}'.format(idx) for idx in marginal ] lf_vecs = lambda_marginal_vecs[marginal] counts = lambda_marginal_counts[marginal] lambda_marginals[marginal] = JointProbabilityDistribution( nodes, [ lf_cardinality for node in nodes ], [ float(counts[tuple(lf_vec)]) / len(L_train) for lf_vec in lf_vecs ] ) for moment in lambda_moment_vals: if lambda_moment_basis[moment] == 0: moment_val = 0 else: moment_val = lambda_moment_counts[moment] / lambda_moment_basis[moment] lambda_moment_vals[moment] = moment_val for factor in lambda_equals_one: if lambda_equals_one_basis[factor] == 0: prob = 0 else: prob = lambda_equals_one_counts[factor] / lambda_equals_one_basis[factor] lambda_equals_one[factor] = prob for zero_condition in lambda_zeros: lambda_zeros[zero_condition] = lambda_zero_counts[zero_condition] / len(L_train) for factor in abstention_probabilities: abstention_probabilities[factor] = abstention_probability_counts[factor] / len(L_train) # update with the easy values lambda_marginals.update(easy_marginals) lambda_moment_vals.update(easy_moments) lambda_equals_one.update(easy_equals_one) lambda_zeros.update(easy_zeros) abstention_probabilities.update(easy_abstention_probs) return lambda_marginals, lambda_moment_vals, lambda_equals_one, lambda_zeros, abstention_probabilities def fit(self, L_train, class_balance=None, Y_dev=None, flip_negative=True, clamp=True, solve_method='triplet_mean', sign_recovery='all_positive', verbose = False): '''Compute the marginal probabilities of each clique and separator set in the junction tree. L_train: an m x n matrix of LF outputs. L_train[k][i] is the value of \lambda_i on item k. 1 means positive, -1
<reponame>sebtelko/pulumi-azure-native # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['NotificationHubArgs', 'NotificationHub'] @pulumi.input_type class NotificationHubArgs: def __init__(__self__, *, namespace_name: pulumi.Input[str], resource_group_name: pulumi.Input[str], adm_credential: Optional[pulumi.Input['AdmCredentialArgs']] = None, apns_credential: Optional[pulumi.Input['ApnsCredentialArgs']] = None, authorization_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]]] = None, baidu_credential: Optional[pulumi.Input['BaiduCredentialArgs']] = None, gcm_credential: Optional[pulumi.Input['GcmCredentialArgs']] = None, location: Optional[pulumi.Input[str]] = None, mpns_credential: Optional[pulumi.Input['MpnsCredentialArgs']] = None, name: Optional[pulumi.Input[str]] = None, notification_hub_name: Optional[pulumi.Input[str]] = None, registration_ttl: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input['SkuArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, wns_credential: Optional[pulumi.Input['WnsCredentialArgs']] = None): """ The set of arguments for constructing a NotificationHub resource. :param pulumi.Input[str] namespace_name: The namespace name. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input['AdmCredentialArgs'] adm_credential: The AdmCredential of the created NotificationHub :param pulumi.Input['ApnsCredentialArgs'] apns_credential: The ApnsCredential of the created NotificationHub :param pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]] authorization_rules: The AuthorizationRules of the created NotificationHub :param pulumi.Input['BaiduCredentialArgs'] baidu_credential: The BaiduCredential of the created NotificationHub :param pulumi.Input['GcmCredentialArgs'] gcm_credential: The GcmCredential of the created NotificationHub :param pulumi.Input[str] location: Resource location :param pulumi.Input['MpnsCredentialArgs'] mpns_credential: The MpnsCredential of the created NotificationHub :param pulumi.Input[str] name: The NotificationHub name. :param pulumi.Input[str] notification_hub_name: The notification hub name. :param pulumi.Input[str] registration_ttl: The RegistrationTtl of the created NotificationHub :param pulumi.Input['SkuArgs'] sku: The sku of the created namespace :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input['WnsCredentialArgs'] wns_credential: The WnsCredential of the created NotificationHub """ pulumi.set(__self__, "namespace_name", namespace_name) pulumi.set(__self__, "resource_group_name", resource_group_name) if adm_credential is not None: pulumi.set(__self__, "adm_credential", adm_credential) if apns_credential is not None: pulumi.set(__self__, "apns_credential", apns_credential) if authorization_rules is not None: pulumi.set(__self__, "authorization_rules", authorization_rules) if baidu_credential is not None: pulumi.set(__self__, "baidu_credential", baidu_credential) if gcm_credential is not None: pulumi.set(__self__, "gcm_credential", gcm_credential) if location is not None: pulumi.set(__self__, "location", location) if mpns_credential is not None: pulumi.set(__self__, "mpns_credential", mpns_credential) if name is not None: pulumi.set(__self__, "name", name) if notification_hub_name is not None: pulumi.set(__self__, "notification_hub_name", notification_hub_name) if registration_ttl is not None: pulumi.set(__self__, "registration_ttl", registration_ttl) if sku is not None: pulumi.set(__self__, "sku", sku) if tags is not None: pulumi.set(__self__, "tags", tags) if wns_credential is not None: pulumi.set(__self__, "wns_credential", wns_credential) @property @pulumi.getter(name="namespaceName") def namespace_name(self) -> pulumi.Input[str]: """ The namespace name. """ return pulumi.get(self, "namespace_name") @namespace_name.setter def namespace_name(self, value: pulumi.Input[str]): pulumi.set(self, "namespace_name", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="admCredential") def adm_credential(self) -> Optional[pulumi.Input['AdmCredentialArgs']]: """ The AdmCredential of the created NotificationHub """ return pulumi.get(self, "adm_credential") @adm_credential.setter def adm_credential(self, value: Optional[pulumi.Input['AdmCredentialArgs']]): pulumi.set(self, "adm_credential", value) @property @pulumi.getter(name="apnsCredential") def apns_credential(self) -> Optional[pulumi.Input['ApnsCredentialArgs']]: """ The ApnsCredential of the created NotificationHub """ return pulumi.get(self, "apns_credential") @apns_credential.setter def apns_credential(self, value: Optional[pulumi.Input['ApnsCredentialArgs']]): pulumi.set(self, "apns_credential", value) @property @pulumi.getter(name="authorizationRules") def authorization_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]]]: """ The AuthorizationRules of the created NotificationHub """ return pulumi.get(self, "authorization_rules") @authorization_rules.setter def authorization_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]]]): pulumi.set(self, "authorization_rules", value) @property @pulumi.getter(name="baiduCredential") def baidu_credential(self) -> Optional[pulumi.Input['BaiduCredentialArgs']]: """ The BaiduCredential of the created NotificationHub """ return pulumi.get(self, "baidu_credential") @baidu_credential.setter def baidu_credential(self, value: Optional[pulumi.Input['BaiduCredentialArgs']]): pulumi.set(self, "baidu_credential", value) @property @pulumi.getter(name="gcmCredential") def gcm_credential(self) -> Optional[pulumi.Input['GcmCredentialArgs']]: """ The GcmCredential of the created NotificationHub """ return pulumi.get(self, "gcm_credential") @gcm_credential.setter def gcm_credential(self, value: Optional[pulumi.Input['GcmCredentialArgs']]): pulumi.set(self, "gcm_credential", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="mpnsCredential") def mpns_credential(self) -> Optional[pulumi.Input['MpnsCredentialArgs']]: """ The MpnsCredential of the created NotificationHub """ return pulumi.get(self, "mpns_credential") @mpns_credential.setter def mpns_credential(self, value: Optional[pulumi.Input['MpnsCredentialArgs']]): pulumi.set(self, "mpns_credential", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The NotificationHub name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="notificationHubName") def notification_hub_name(self) -> Optional[pulumi.Input[str]]: """ The notification hub name. """ return pulumi.get(self, "notification_hub_name") @notification_hub_name.setter def notification_hub_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "notification_hub_name", value) @property @pulumi.getter(name="registrationTtl") def registration_ttl(self) -> Optional[pulumi.Input[str]]: """ The RegistrationTtl of the created NotificationHub """ return pulumi.get(self, "registration_ttl") @registration_ttl.setter def registration_ttl(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "registration_ttl", value) @property @pulumi.getter def sku(self) -> Optional[pulumi.Input['SkuArgs']]: """ The sku of the created namespace """ return pulumi.get(self, "sku") @sku.setter def sku(self, value: Optional[pulumi.Input['SkuArgs']]): pulumi.set(self, "sku", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="wnsCredential") def wns_credential(self) -> Optional[pulumi.Input['WnsCredentialArgs']]: """ The WnsCredential of the created NotificationHub """ return pulumi.get(self, "wns_credential") @wns_credential.setter def wns_credential(self, value: Optional[pulumi.Input['WnsCredentialArgs']]): pulumi.set(self, "wns_credential", value) class NotificationHub(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, adm_credential: Optional[pulumi.Input[pulumi.InputType['AdmCredentialArgs']]] = None, apns_credential: Optional[pulumi.Input[pulumi.InputType['ApnsCredentialArgs']]] = None, authorization_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SharedAccessAuthorizationRulePropertiesArgs']]]]] = None, baidu_credential: Optional[pulumi.Input[pulumi.InputType['BaiduCredentialArgs']]] = None, gcm_credential: Optional[pulumi.Input[pulumi.InputType['GcmCredentialArgs']]] = None, location: Optional[pulumi.Input[str]] = None, mpns_credential: Optional[pulumi.Input[pulumi.InputType['MpnsCredentialArgs']]] = None, name: Optional[pulumi.Input[str]] = None, namespace_name: Optional[pulumi.Input[str]] = None, notification_hub_name: Optional[pulumi.Input[str]] = None, registration_ttl: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, wns_credential: Optional[pulumi.Input[pulumi.InputType['WnsCredentialArgs']]] = None, __props__=None): """ Description of a NotificationHub Resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['AdmCredentialArgs']] adm_credential: The AdmCredential of the created NotificationHub :param pulumi.Input[pulumi.InputType['ApnsCredentialArgs']] apns_credential: The ApnsCredential of the created NotificationHub :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SharedAccessAuthorizationRulePropertiesArgs']]]] authorization_rules: The AuthorizationRules of the created NotificationHub :param pulumi.Input[pulumi.InputType['BaiduCredentialArgs']] baidu_credential: The BaiduCredential of the created NotificationHub :param pulumi.Input[pulumi.InputType['GcmCredentialArgs']] gcm_credential: The GcmCredential of the created NotificationHub :param pulumi.Input[str] location: Resource location :param pulumi.Input[pulumi.InputType['MpnsCredentialArgs']] mpns_credential: The MpnsCredential of the created NotificationHub :param pulumi.Input[str] name: The NotificationHub name. :param pulumi.Input[str] namespace_name: The namespace name. :param pulumi.Input[str] notification_hub_name: The notification hub name. :param pulumi.Input[str] registration_ttl: The RegistrationTtl of the created NotificationHub :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the created namespace :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input[pulumi.InputType['WnsCredentialArgs']] wns_credential: The WnsCredential of the created NotificationHub """ ... @overload def __init__(__self__, resource_name: str, args: NotificationHubArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Description of a NotificationHub Resource. :param str resource_name: The name of the resource. :param NotificationHubArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(NotificationHubArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, adm_credential: Optional[pulumi.Input[pulumi.InputType['AdmCredentialArgs']]] = None, apns_credential: Optional[pulumi.Input[pulumi.InputType['ApnsCredentialArgs']]] = None, authorization_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SharedAccessAuthorizationRulePropertiesArgs']]]]] = None, baidu_credential: Optional[pulumi.Input[pulumi.InputType['BaiduCredentialArgs']]] = None, gcm_credential: Optional[pulumi.Input[pulumi.InputType['GcmCredentialArgs']]] = None, location: Optional[pulumi.Input[str]] = None, mpns_credential: Optional[pulumi.Input[pulumi.InputType['MpnsCredentialArgs']]] = None, name: Optional[pulumi.Input[str]] = None, namespace_name: Optional[pulumi.Input[str]] = None, notification_hub_name: Optional[pulumi.Input[str]] = None, registration_ttl: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, wns_credential: Optional[pulumi.Input[pulumi.InputType['WnsCredentialArgs']]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NotificationHubArgs.__new__(NotificationHubArgs) __props__.__dict__["adm_credential"] = adm_credential __props__.__dict__["apns_credential"] = apns_credential __props__.__dict__["authorization_rules"] = authorization_rules __props__.__dict__["baidu_credential"] = baidu_credential __props__.__dict__["gcm_credential"] = gcm_credential __props__.__dict__["location"] = location __props__.__dict__["mpns_credential"] = mpns_credential __props__.__dict__["name"] = name if namespace_name is None and not opts.urn: raise TypeError("Missing required property 'namespace_name'") __props__.__dict__["namespace_name"] = namespace_name __props__.__dict__["notification_hub_name"] = notification_hub_name __props__.__dict__["registration_ttl"] = registration_ttl if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["sku"] = sku __props__.__dict__["tags"] = tags __props__.__dict__["wns_credential"] = wns_credential __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:notificationhubs/v20160301:NotificationHub"), pulumi.Alias(type_="azure-native:notificationhubs:NotificationHub"), pulumi.Alias(type_="azure-nextgen:notificationhubs:NotificationHub"), pulumi.Alias(type_="azure-native:notificationhubs/v20140901:NotificationHub"), pulumi.Alias(type_="azure-nextgen:notificationhubs/v20140901:NotificationHub"), pulumi.Alias(type_="azure-native:notificationhubs/v20170401:NotificationHub"), pulumi.Alias(type_="azure-nextgen:notificationhubs/v20170401:NotificationHub")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(NotificationHub, __self__).__init__( 'azure-native:notificationhubs/v20160301:NotificationHub', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'NotificationHub': """ Get an existing NotificationHub resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions
<reponame>thejacobhardman/Tic-Tac-Toe<filename>Tic-Tac-Toe.py # <NAME> # Intro To Programming # Professor <NAME> # 4/10/19 # Python Version 3.7.3 # Credit for punch sound effect to <NAME>: http://soundbible.com/995-Jab.html # Credit for cat scream sound effect to Ca9: http://soundbible.com/1509-Cat-Scream.html # All other sounds are in public domain # Importing pkgs import tkinter as tk import tkinter.font as tkFont from tkinter import messagebox import ctypes import sys import winsound as sound import random ########################################################## GLOBAL VARIABLES ############################################################## # Initializing the main window Window = tk.Tk() Window.title("Tic-Tac-Toe") # I wanted the buttons on the keypad to change color when you mouse over them so I found this code on Stack Overflow: # https://stackoverflow.com/questions/49888623/tkinter-hovering-over-button-color-change class HoverButton(tk.Button): def __init__(self, master, **kw): tk.Button.__init__(self,master=master,**kw) self.defaultBackground = self["background"] self.bind("<Enter>", self.on_enter) self.bind("<Leave>", self.on_leave) def on_enter(self, e): self['background'] = self['activebackground'] def on_leave(self, e): self['background'] = self.defaultBackground # Creating a custom font for the program to use Text_Font = tkFont.Font(family='Helvetica', size=40, weight='bold') # Background image to store in the Title Frame Title_Background = tk.PhotoImage(file="Tic-Tac-Toe-Background.png") Smaller_Background = Title_Background.subsample(2,2) # Displays whose turn it is Turn_Tracker = tk.StringVar() Turn_Tracker.set("X's Turn") # Tracks whether the the gameboard should display an 'X' or an 'O' Gameboard_Text = [tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar()] i = 0 for var in Gameboard_Text: var.set("") # Tracks whether the game is in singleplayer mode or in multiplayer mode Singleplayer = False # Tracks if the game is over Game_Over = False ########################################################## PROGRAM LOGIC ################################################################ ### Initiating the GUI Framework and the static GUI elements def Init_GUI(): global Gameboard global Reset # Content Frame that holds all of the sub widgets Content = tk.Frame(Window, height="1000", width="600") Content.pack(expand="true", fill="both") # Displays a banner image Title = tk.Frame(Content) Title.place(relwidth="1", relheight="0.25") Title_Image = tk.Label(Title, image=Smaller_Background) Title_Image.place(relwidth="1", relheight="1") # Displays whose turn it is as well alerts if there is a winner Display = tk.Frame(Content) Display.place(rely="0.25", relwidth="1", relheight="0.1") Display_Text = tk.Label(Display, font=Text_Font, textvariable=Turn_Tracker) Display_Text.place(relwidth="1", relheight="1") # The game board where the user will choose where to play Gameboard = tk.Frame(Content) Gameboard.place(rely="0.35", relwidth="1", relheight="0.55") # Provides the option for the user to reset the game (This will reset the Gameboard frame) Reset = tk.Frame(Content) Reset.place(rely="0.89", relwidth="1", relheight="0.11") ### Initiating the active GUI elements def Active_GUI(): North_West = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[0], command=lambda:[Update_Grid(1), Check_Win()]) North_West.place(relwidth="0.333", relheight="0.33") North = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[1], command=lambda:[Update_Grid(2), Check_Win()]) North.place(relx="0.333", relwidth="0.333", relheight="0.33") North_East = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[2], command=lambda:[Update_Grid(3), Check_Win()]) North_East.place(relx="0.6663", relwidth="0.3335", relheight="0.33") West = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[3], command=lambda:[Update_Grid(4), Check_Win()]) West.place(rely="0.33", relwidth="0.333", relheight="0.33") Center = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[4], command=lambda:[Update_Grid(5), Check_Win()]) Center.place(relx="0.333", rely="0.33", relwidth="0.333", relheight="0.33") East = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[5], command=lambda:[Update_Grid(6), Check_Win()]) East.place(relx="0.6663", rely="0.33", relwidth="0.3335", relheight="0.33") South_West = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[6], command=lambda:[Update_Grid(7), Check_Win()]) South_West.place(rely="0.66", relwidth="0.333", relheight="0.33") South = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[7], command=lambda:[Update_Grid(8), Check_Win()]) South.place(relx="0.333", rely="0.66", relwidth="0.333", relheight="0.33") South_East = HoverButton(Gameboard, bg="dark red", bd="2", fg="black", activebackground="red", activeforeground="black", font=Text_Font, textvariable=Gameboard_Text[8], command=lambda:[Update_Grid(9), Check_Win()]) South_East.place(relx="0.6663", rely="0.66", relwidth="0.3335", relheight="0.33") Reset_Button = HoverButton(Reset, bg="black", font=Text_Font, text="Reset Game", fg="white", activebackground="red", activeforeground="white", command=lambda:Reset_Game()) Reset_Button.place(relwidth="1", relheight="1") ### Updates the display to show whose turn it is. def Update_Display(): if Turn_Tracker.get() == "X's Turn": Turn_Tracker.set("O's Turn") elif Turn_Tracker.get() == "O's Turn": Turn_Tracker.set("X's Turn") ### Updates the gameboard to display the player's moves. def Update_Grid(arg): global Singleplayer # The player selected the North-West button (Internal logic is the same for all buttons) if arg == 1: if Gameboard_Text[0].get() == "": # Checking if someone has already played on that space if Turn_Tracker.get() == "X's Turn": # It's the first player's turn sound.PlaySound("Jab.wav", 1) # Sound effects for fun Gameboard_Text[0].set("X") # Making the move Update_Display() # Updates the turn tracker else: # It's the second player's turn sound.PlaySound("Jab.wav", 1) # Sound effects for fun Gameboard_Text[0].set("O") # Making the move Update_Display() # Updates the turn tracker else: # Executes if someone has already played on the selected space if Singleplayer == True: # Checks if the user has enabled the AI if Turn_Tracker.get() == "O's Turn": # Checks if the AI made the bad move AI_Turn() # Has the computer guess again else: # The player made the bad guess sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: # The player has not enabled the AI sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the North button elif arg == 2: if Gameboard_Text[1].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[1].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[1].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if Turn_Tracker.get() == "O's Turn": AI_Turn() else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the North-East button elif arg == 3: if Gameboard_Text[2].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[2].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[2].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if Turn_Tracker.get() == "O's Turn": AI_Turn() else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the West button elif arg == 4: if Gameboard_Text[3].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[3].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[3].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if Turn_Tracker.get() == "O's Turn": AI_Turn() else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the Center button elif arg == 5: if Gameboard_Text[4].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[4].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[4].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if Turn_Tracker.get() == "O's Turn": AI_Turn() else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the East button elif arg == 6: if Gameboard_Text[5].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[5].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[5].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if Turn_Tracker.get() == "O's Turn": AI_Turn() else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the South-West button elif arg == 7: if Gameboard_Text[6].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[6].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[6].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if Turn_Tracker.get() == "O's Turn": AI_Turn() else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") else: sound.PlaySound("A-Tone.wav", 1) # Error sound messagebox.showerror(title="ERROR", message="You cannot play on a space that has already been played on.") # The player selected the South button elif arg == 8: if Gameboard_Text[7].get() == "": if Turn_Tracker.get() == "X's Turn": sound.PlaySound("Jab.wav", 1) Gameboard_Text[7].set("X") Update_Display() else: sound.PlaySound("Jab.wav", 1) Gameboard_Text[7].set("O") Update_Display() else: # Executes if someone has already played on the selected space if Singleplayer == True: if
returned results to models registered in the search (default = True) Returns: A dictionary with the following keys: `results` -- A list of `SearchResult` `hits` -- The total available results Opens a database connection, then builds a simple query using the `model_instance` to build the unique identifier. For each document retrieved(should always be one), adds an entry into an RSet (relevance set) with the document id, then, uses the RSet to query for an ESet (A set of terms that can be used to suggest expansions to the original query), omitting any document that was in the original query. Finally, processes the resulting matches and returns. """ database = self._database() if result_class is None: result_class = SearchResult query = xapian.Query(TERM_PREFIXES['id'] + get_identifier(model_instance)) enquire = xapian.Enquire(database) enquire.set_query(query) rset = xapian.RSet() if not end_offset: end_offset = database.get_doccount() match = None for match in self._get_enquire_mset(database, enquire, 0, end_offset): rset.add_document(match.docid) if match is None: if not self.silently_fail: raise InvalidIndexError('Instance %s with id "%d" not indexed' % (get_identifier(model_instance), model_instance.id)) else: return {'results': [], 'hits': 0} query = xapian.Query( xapian.Query.OP_ELITE_SET, [expand.term for expand in enquire.get_eset(match.document.termlist_count(), rset, XHExpandDecider())], match.document.termlist_count() ) query = xapian.Query( xapian.Query.OP_AND_NOT, [query, TERM_PREFIXES['id'] + get_identifier(model_instance)] ) if limit_to_registered_models: query = self._build_models_query(query) if additional_query: query = xapian.Query( xapian.Query.OP_AND, query, additional_query ) enquire.set_query(query) results = [] matches = self._get_enquire_mset(database, enquire, start_offset, end_offset) for match in matches: app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document)) results.append( result_class(app_label, model_name, pk, match.percent, **model_data) ) return { 'results': results, 'hits': self._get_hit_count(database, enquire), 'facets': { 'fields': {}, 'dates': {}, 'queries': {}, }, 'spelling_suggestion': None, } def parse_query(self, query_string): """ Given a `query_string`, will attempt to return a xapian.Query Required arguments: ``query_string`` -- A query string to parse Returns a xapian.Query """ if query_string == '*': return xapian.Query('') # Match everything elif query_string == '': return xapian.Query() # Match nothing qp = xapian.QueryParser() qp.set_database(self._database()) qp.set_stemmer(xapian.Stem(self.language)) qp.set_stemming_strategy(self.stemming_strategy) qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR]) qp.add_boolean_prefix('django_ct', TERM_PREFIXES['django_ct']) for field_dict in self.schema: # since 'django_ct' has a boolean_prefix, # we ignore it here. if field_dict['field_name'] == 'django_ct': continue qp.add_prefix( field_dict['field_name'], TERM_PREFIXES['field'] + field_dict['field_name'].upper() ) vrp = XHValueRangeProcessor(self) qp.add_valuerangeprocessor(vrp) return qp.parse_query(query_string, self.flags) def build_schema(self, fields): """ Build the schema from fields. :param fields: A list of fields in the index :returns: list of dictionaries Each dictionary has the keys field_name: The name of the field index type: what type of value it is 'multi_valued': if it allows more than one value 'column': a number identifying it 'type': the type of the field 'multi_valued': 'false', 'column': 0} """ content_field_name = '' schema_fields = [ {'field_name': ID, 'type': 'text', 'multi_valued': 'false', 'column': 0}, {'field_name': DJANGO_ID, 'type': 'integer', 'multi_valued': 'false', 'column': 1}, {'field_name': DJANGO_CT, 'type': 'text', 'multi_valued': 'false', 'column': 2}, ] self._columns[ID] = 0 self._columns[DJANGO_ID] = 1 self._columns[DJANGO_CT] = 2 column = len(schema_fields) for field_name, field_class in sorted(list(fields.items()), key=lambda n: n[0]): if field_class.document is True: content_field_name = field_class.index_fieldname if field_class.indexed is True: field_data = { 'field_name': field_class.index_fieldname, 'type': 'text', 'multi_valued': 'false', 'column': column, } if field_class.field_type == 'date': field_data['type'] = 'date' elif field_class.field_type == 'datetime': field_data['type'] = 'datetime' elif field_class.field_type == 'integer': field_data['type'] = 'integer' elif field_class.field_type == 'float': field_data['type'] = 'float' elif field_class.field_type == 'boolean': field_data['type'] = 'boolean' elif field_class.field_type == 'ngram': field_data['type'] = 'ngram' elif field_class.field_type == 'edge_ngram': field_data['type'] = 'edge_ngram' if field_class.is_multivalued: field_data['multi_valued'] = 'true' schema_fields.append(field_data) self._columns[field_data['field_name']] = column column += 1 return content_field_name, schema_fields @staticmethod def _do_highlight(content, query, tag='em'): """ Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted """ for term in query: term = term.decode('utf-8') for match in re.findall('[^A-Z]+', term): # Ignore field identifiers match_re = re.compile(match, re.I) content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content) return content def _prepare_facet_field_spies(self, facets): """ Returns a list of spies based on the facets used to count frequencies. """ spies = [] for facet in facets: slot = self.column[facet] spy = xapian.ValueCountMatchSpy(slot) # add attribute "slot" to know which column this spy is targeting. spy.slot = slot spies.append(spy) return spies def _process_facet_field_spies(self, spies): """ Returns a dict of facet names with lists of tuples of the form (term, term_frequency) from a list of spies that observed the enquire. """ facet_dict = {} for spy in spies: field = self.schema[spy.slot] field_name, field_type = field['field_name'], field['type'] facet_dict[field_name] = [] for facet in list(spy.values()): if field_type == 'float': # the float term is a Xapian serialized object, which is # in bytes. term = facet.term else: term = facet.term.decode('utf-8') facet_dict[field_name].append((_from_xapian_value(term, field_type), facet.termfreq)) return facet_dict def _do_multivalued_field_facets(self, results, field_facets): """ Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199) """ facet_dict = {} for field in field_facets: facet_list = {} if not self._multi_value_field(field): continue for result in results: field_value = getattr(result, field) for item in field_value: # Facet each item in a MultiValueField facet_list[item] = facet_list.get(item, 0) + 1 facet_dict[field] = list(facet_list.items()) return facet_dict @staticmethod def _do_date_facets(results, date_facets): """ Private method that facets a document by date ranges Required arguments: `results` -- A list SearchResults to facet `date_facets` -- A dictionary containing facet parameters: {'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}} nb., gap must be one of the following: year|month|day|hour|minute|second For each date facet field in `date_facets`, generates a list of date ranges (from `start_date` to `end_date` by `gap_by`) then iterates through `results` and tallies the count for each date_facet. Returns a dictionary of date facets (fields) containing a list with entries for each range and a count of documents matching the range. eg. { 'pub_date': [ (datetime.datetime(2009, 1, 1, 0, 0), 5), (datetime.datetime(2009, 2, 1, 0, 0), 0), (datetime.datetime(2009, 3, 1, 0, 0), 0), (datetime.datetime(2008, 4, 1, 0, 0), 1), (datetime.datetime(2008, 5, 1, 0, 0), 2), ], } """ def next_datetime(previous, gap_value, gap_type): year = previous.year month = previous.month if gap_type == 'year': next = previous.replace(year=year + gap_value) elif gap_type == 'month': if month + gap_value <= 12: next = previous.replace(month=month + gap_value) else: next = previous.replace( month=((month + gap_value) % 12), year=(year + (month + gap_value) // 12) ) elif gap_type == 'day': next = previous + datetime.timedelta(days=gap_value) elif gap_type == 'hour': return previous + datetime.timedelta(hours=gap_value) elif gap_type == 'minute': next = previous + datetime.timedelta(minutes=gap_value) elif gap_type == 'second': next = previous + datetime.timedelta(seconds=gap_value) else: raise TypeError('\'gap_by\' must be ' '{second, minute, day, month, year}') return next facet_dict = {} for date_facet, facet_params in list(date_facets.items()): gap_type = facet_params.get('gap_by') gap_value = facet_params.get('gap_amount', 1) date_range = facet_params['start_date'] # construct the bins of the histogram facet_list = [] while date_range < facet_params['end_date']: facet_list.append((date_range, 0)) date_range = next_datetime(date_range, gap_value, gap_type) facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True) for result in results: result_date = getattr(result, date_facet) # convert date to datetime if not isinstance(result_date, datetime.datetime): result_date = datetime.datetime(result_date.year, result_date.month, result_date.day) # ignore results outside the boundaries. if facet_list[0][0] < result_date < facet_list[-1][0]: continue # populate the histogram by putting the result on the right bin. for n, facet_date in enumerate(facet_list): if result_date > facet_date[0]: # equal to facet_list[n][1] += 1, but for a tuple facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1)) break # bin found; go to next result facet_dict[date_facet] = facet_list return facet_dict def _do_query_facets(self, results, query_facets): """ Private method that facets a document by query Required arguments: `results` -- A list SearchResults to facet `query_facets` -- A dictionary containing facet parameters: {'field': 'query', [...]} For each query in `query_facets`, generates a dictionary entry with the field name as the key and a tuple with the query and result count as the value. eg. {'name': ('a*', 5)} """ facet_dict
<reponame>AILab-FOI/APi<gh_stars>0 #!/usr/bin/env python3 from baseagent import * import json import argparse class APiChannel( APiBaseAgent ): '''Channel agent.''' REPL_STR = '"$$$API_THIS_IS_VARIABLE_%s$$$"' def __init__( self, channelname, name, password, holon, token, portrange, channel_input=None, channel_output=None, transformer=None ): self.channelname = channelname self.holon = holon super().__init__( name, password, token ) self.kb = swipl() self.var_re = re.compile( r'[\?][a-zA-Z][a-zA-Z0-9-_]*' ) self.sender_agents = [] self.receiver_agents = [] self.min_port, self.max_port = portrange self.attach_servers = [] self.subscribe_servers = [] self.agree_message_template = {} self.agree_message_template[ 'performative' ] = 'agree' self.agree_message_template[ 'ontology' ] = 'APiDataTransfer' self.agree_message_template[ 'auth-token' ] = self.auth self.refuse_message_template = {} self.refuse_message_template[ 'performative' ] = 'refuse' self.refuse_message_template[ 'ontology' ] = 'APiDataTransfer' self.refuse_message_template[ 'auth-token' ] = self.auth self.input = channel_input self.output = channel_output self.transformer = transformer # TODO: return map function based on channel_input/output # descriptor (can be JSON, XML, REGEX, TRANSFORMER, TRANSPARENT) # * JSON -> JSON input or output # XML -> XML input or output # * REGEX -> Python style regex (with named groups) input # TRANSFORMER -> read definition from channel description (.cd) file # * TRANSPARENT -> no mapping needed, just forward # # * -> Done! if not self.input or not self.output: if self.transformer: self.map = self.map_transformer else: # TRANSPARENT channel (default) self.map = lambda x: x else: if self.transformer: err = "Both input/output combination and transformer defined. I don't know which mapping to use." raise APiChannelDefinitionError( err ) elif self.input.startswith( 'regex( ' ): reg = self.input[ 7:-2 ] print( 'RE', reg ) self.input_re = re.compile( reg ) self.map = self.map_re elif self.input.startswith( 'json( ' ): self.input_json = self.input[ 6:-2 ] self.kb.query( 'use_module(library(http/json))' ) cp = self.input_json replaces = {} for var in self.var_re.findall( self.input_json ): rpl = self.REPL_STR % var replaces[ rpl[ 1:-1 ] ] = var cp = cp.replace( var, rpl ) query = " APIRES = ok, open_string( '%s', S ), json_read_dict( S, X ). " % cp res = self.kb.query( query ) prolog_json = res[ 0 ][ 'X' ] for k, v in replaces.items(): prolog_json = prolog_json.replace( k, 'X' + v[ 1: ] ) self.input_json = prolog_json self.map = self.map_json elif self.input.startswith( 'xml( ' ): # TODO: Implement XML raise NotImplementedError( NIE ) def map( self, data ): pass def map_re( self, data ): print( 'MAPRE DATA', data ) match = self.input_re.match( data ) print( 'MAPRE MATCH', match ) vars = self.input_re.groupindex.keys() print( 'MAPRE MATCH', vars ) results = {} if not match: return '' for i in vars: results[ i ] = match.group( i ) query = '' for var, val in results.items(): query += 'X' + var + " = '" + val + "', " query = 'APIRES = ok, ' + query[ :-2 ] res = self.kb.query( query ) return self.format_output( res ) def format_output( self, res ): output = self.output for var, val in res[ 0 ].items(): output = output.replace( '?' + var[ 1: ], val ) return output def map_transformer( self, data ): # TODO: Implement transformer raise NotImplementedError( NIE ) def map_json( self, data ): query = " APIRES = ok, open_string( '%s', S ), json_read_dict( S, X ). " % data res = self.kb.query( query ) prolog_json = res[ 0 ][ 'X' ] query = " APIRES = ok, X = %s, Y = %s, X = Y. " % ( prolog_json, self.input_json ) res = self.kb.query( query ) del res[ 0 ][ 'X' ] del res[ 0 ][ 'Y' ] return self.format_output( res ) def map_xml( self, data ): # TODO: Implement XML raise NotImplementedError( NIE ) def get_free_port( self ): '''Get a free port on the host''' sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) port = self.min_port while port <= self.max_port: try: sock.bind( ( '', port ) ) sock.close() return port except OSError: port += 1 raise IOError( 'No free ports in range %d - %d' % ( self.min_port, self.max_port ) ) def get_ip( self ): '''Get the current IP address of the agent''' # TODO: Verify this works with outside network # addresses! s = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) try: # doesn't even have to be reachable s.connect( ( '10.255.255.255', 1 ) ) IP = s.getsockname()[ 0 ] except Exception: IP = '127.0.0.1' finally: s.close() return IP def create_server( self, port, protocol ): if protocol == 'udp': return nclib.UDPServer( ( '0.0.0.0', port ) ) return nclib.TCPServer( ( '0.0.0.0', port ) ) def get_server( self, srv_type, protocol ): '''Get a NetCat server for sending or receiving''' port = self.get_free_port() host = self.get_ip() self.say( host, port ) srv_created = False while not srv_created: try: srv = self.create_server( port, protocol ) srv_created = True print( f'{protocol} SERVER CONNECTED AT PORT', port ) except OSError as e: port = self.get_free_port() if srv_type == 'attach': self.attach_servers.append( srv ) print( 'ATTACH SERVERS:', self.attach_servers ) elif srv_type == 'subscribe': self.subscribe_servers.append( srv ) print( 'SUBSCRIBE SERVERS:', self.subscribe_servers ) else: raise APiChannelDefinitionError( 'Unknown server type:', srv_type ) return host, str( port ), protocol class Subscribe( CyclicBehaviour ): '''Agent wants to listen or write to channel''' async def run( self ): msg = await self.receive( timeout=0.1 ) if msg: if self.agent.verify( msg ): self.agent.say( '(Subscribe) Message verified, processing ...' ) self.agent.receiver_agents.append( str( msg.sender ) ) metadata = deepcopy( self.agent.agree_message_template ) metadata[ 'in-reply-to' ] = msg.metadata[ 'reply-with' ] metadata[ 'agent' ] = self.agent.channelname req_protocol = msg.metadata[ 'protocol' ] if msg.metadata[ 'performative' ] == 'subscribe': metadata[ 'type' ] = 'input' server, port, protocol = self.agent.get_server( 'subscribe', req_protocol ) print( 'ADDED subscribe server', server, port ) elif msg.metadata[ 'performative' ] == 'request': metadata[ 'type' ] = 'output' server, port, protocol = self.agent.get_server( 'attach', req_protocol ) print( 'ADDED attach server', server, port ) else: self.agent.say( 'Unknown message' ) metadata = self.agent.refuse_message_template metadata[ 'in-reply-to' ] = msg.metadata[ 'reply-with' ] metadata[ 'reason' ] = 'unknown-message' await self.agent.schedule_message( str( msg.sender ), metadata=metadata ) metadata[ 'server' ] = server metadata[ 'port' ] = port metadata[ 'protocol' ] = protocol await self.agent.schedule_message( str( msg.sender ), metadata=metadata ) await asyncio.sleep( 0.1 ) else: self.agent.say( 'Message could not be verified. IMPOSTER!!!!!!' ) metadata = self.agent.refuse_message_template metadata[ 'in-reply-to' ] = msg.metadata[ 'reply-with' ] metadata[ 'reason' ] = 'security-policy' await self.agent.schedule_message( str( msg.sender ), metadata=metadata ) class Forward( CyclicBehaviour ): '''Receive inputs, map them to outputs and send to subscribers''' # TODO: Test this behaviour async def run( self ): def iter_clients( srv ): try: c, a = srv.sock.accept() client = nclib.Netcat( sock=c, server=a ) yield client for client in srv: yield client except Exception as e: return if self.agent.attach_servers: for srv in self.agent.attach_servers: srv.sock.settimeout( 0.1 ) for client in iter_clients( srv ): self.agent.say( 'CLIENT', client, srv.addr ) result = client.recv_until( self.agent.delimiter, timeout=0.1 ) self.agent.say( 'RESULT', result, srv.addr ) if result: self.agent.say( 'MAPPING RESULT', result.decode(), srv.addr ) msg = self.agent.map( result.decode() ) self.agent.say( 'MSG', msg, srv.addr ) self.agent.say( 'SERVER LIST 1', self.agent.subscribe_servers ) if self.agent.subscribe_servers: self.agent.say( 'SERVER LIST 2', self.agent.subscribe_servers ) for srv_out in self.agent.subscribe_servers: self.agent.say( 'OUT SERVER', srv_out, srv_out.addr ) for client_out in srv_out: self.agent.say( 'SENDING MSG TO', client_out, client_out.peer ) client_out.sendline( msg.encode() ) self.agent.say( 'DONE SENDING MSG' ) async def setup(self): super().setup() bsubs = self.Subscribe() bsubs_template = Template( metadata={ "ontology": "APiDataTransfer" } # "performative": "subscribe", ) self.add_behaviour( bsubs, bsubs_template ) bfwd = self.Forward() self.add_behaviour( bfwd ) def main( name, address, password, holon, token, portrange, input, output, transformer ): portrange = json.loads( portrange ) input = json.loads( input ) output = json.loads( output ) transformer = json.loads( transformer ) a = APiChannel( name, address, password, holon, token, portrange, channel_input=input, channel_output=output, transformer=transformer ) a.start() if __name__ == '__main__': parser = argparse.ArgumentParser( description='APi agent.') parser.add_argument( 'name', metavar='NAME', type=str, help="Channel's local APi name" ) parser.add_argument( 'address', metavar='ADDRESS', type=str, help="Channel's XMPP/JID address" ) parser.add_argument( 'password', metavar='PWD', type=str, help="Channel's XMPP/JID password" ) parser.add_argument( 'holon', metavar='HOLON', type=str, help="Channel's instantiating holon's XMPP/JID address" ) parser.add_argument( 'token', metavar='TOKEN', type=str,
"-".join(fname.split("-")[:-3]) if fname.endswith(".tar"): fname, _ = os.path.splitext(fname) # Substring out package name (plus dash) from file name to get version. version = fname[len(name) + 1 :] # Ignore implicit post releases in version number. if "-" in version and version.split("-")[1].isdigit(): version = version.split("-")[0] return version def get_downloads_info(names_map, section): from .vendor.requirementslib.models.requirements import Requirement info = [] p = project.parsed_pipfile for fname in os.listdir(project.download_location): # Get name from filename mapping. name = Requirement.from_line(names_map[fname]).name # Get the version info from the filenames. version = parse_download_fname(fname, name) # Get the hash of each file. cmd = '{0} hash "{1}"'.format( escape_grouped_arguments(which_pip()), os.sep.join([project.download_location, fname]), ) c = delegator.run(cmd) hash = c.out.split("--hash=")[1].strip() # Verify we're adding the correct version from Pipfile # and not one from a dependency. specified_version = p[section].get(name, "") if is_required_version(version, specified_version): info.append(dict(name=name, version=version, hash=hash)) return info def do_lock( system=False, clear=False, pre=False, keep_outdated=False, write=True, pypi_mirror=None, ): """Executes the freeze functionality.""" from .utils import get_vcs_deps cached_lockfile = {} if not pre: pre = project.settings.get("allow_prereleases") if keep_outdated: if not project.lockfile_exists: click.echo( "{0}: Pipfile.lock must exist to use --keep-outdated!".format( crayons.red("Warning", bold=True) ) ) sys.exit(1) cached_lockfile = project.lockfile_content # Create the lockfile. lockfile = project._lockfile # Cleanup lockfile. for section in ("default", "develop"): for k, v in lockfile[section].copy().items(): if not hasattr(v, "keys"): del lockfile[section][k] # Ensure that develop inherits from default. dev_packages = project.dev_packages.copy() for dev_package in project.dev_packages: if dev_package in project.packages: dev_packages[dev_package] = project.packages[dev_package] # Resolve dev-package dependencies, with pip-tools. pip_freeze = delegator.run( "{0} freeze".format(escape_grouped_arguments(which_pip(allow_global=system))) ).out sections = { "dev": { "packages": project.dev_packages, "vcs": project.vcs_dev_packages, "pipfile_key": "dev_packages", "lockfile_key": "develop", "log_string": "dev-packages", "dev": True, }, "default": { "packages": project.packages, "vcs": project.vcs_packages, "pipfile_key": "packages", "lockfile_key": "default", "log_string": "packages", "dev": False, }, } for section_name in ["dev", "default"]: settings = sections[section_name] if write: # Alert the user of progress. click.echo( u"{0} {1} {2}".format( crayons.normal("Locking"), crayons.red("[{0}]".format(settings["log_string"])), crayons.normal("dependencies…"), ), err=True, ) deps = convert_deps_to_pip( settings["packages"], project, r=False, include_index=True ) results = venv_resolve_deps( deps, which=which, project=project, clear=clear, pre=pre, allow_global=system, pypi_mirror=pypi_mirror, ) # Add dependencies to lockfile. for dep in results: is_top_level = dep["name"] in settings["packages"] pipfile_entry = settings["packages"][dep["name"]] if is_top_level else None dep_lockfile = clean_resolved_dep( dep, is_top_level=is_top_level, pipfile_entry=pipfile_entry ) lockfile[settings["lockfile_key"]].update(dep_lockfile) # Add refs for VCS installs. # TODO: be smarter about this. vcs_reqs, vcs_lockfile = get_vcs_deps( project, pip_freeze, which=which, clear=clear, pre=pre, allow_global=system, dev=settings["dev"], ) vcs_lines = [req.as_line() for req in vcs_reqs if req.editable] vcs_results = venv_resolve_deps( vcs_lines, which=which, project=project, clear=clear, pre=pre, allow_global=system, pypi_mirror=pypi_mirror, ) for dep in vcs_results: normalized = pep423_name(dep["name"]) if not hasattr(dep, "keys") or not hasattr(dep["name"], "keys"): continue is_top_level = dep["name"] in vcs_lockfile or normalized in vcs_lockfile if is_top_level: try: pipfile_entry = vcs_lockfile[dep["name"]] except KeyError: pipfile_entry = vcs_lockfile[normalized] else: pipfile_entry = None dep_lockfile = clean_resolved_dep( dep, is_top_level=is_top_level, pipfile_entry=pipfile_entry ) vcs_lockfile.update(dep_lockfile) lockfile[settings["lockfile_key"]].update(vcs_lockfile) # Support for --keep-outdated… if keep_outdated: for section_name, section in ( ("default", project.packages), ("develop", project.dev_packages), ): for package_specified in section: norm_name = pep423_name(package_specified) if not is_pinned(section[package_specified]): if norm_name in cached_lockfile[section_name]: lockfile[section_name][norm_name] = cached_lockfile[ section_name ][norm_name] # Overwrite any develop packages with default packages. for default_package in lockfile["default"]: if default_package in lockfile["develop"]: lockfile["develop"][default_package] = lockfile["default"][default_package] if write: project.write_lockfile(lockfile) click.echo( "{0}".format( crayons.normal( "Updated Pipfile.lock ({0})!".format( lockfile["_meta"].get("hash", {}).get("sha256")[-6:] ), bold=True, ) ), err=True, ) else: return lockfile def do_purge(bare=False, downloads=False, allow_global=False): """Executes the purge functionality.""" from .vendor.requirementslib.models.requirements import Requirement if downloads: if not bare: click.echo(crayons.normal(u"Clearing out downloads directory…", bold=True)) shutil.rmtree(project.download_location) return freeze = delegator.run( "{0} freeze".format( escape_grouped_arguments(which_pip(allow_global=allow_global)) ) ).out # Remove comments from the output, if any. installed = [ line for line in freeze.splitlines() if not line.lstrip().startswith("#") ] # Remove setuptools and friends from installed, if present. for package_name in BAD_PACKAGES: for i, package in enumerate(installed): if package.startswith(package_name): del installed[i] actually_installed = [] for package in installed: try: dep = Requirement.from_line(package) except AssertionError: dep = None if dep and not dep.is_vcs and not dep.editable: dep = dep.name actually_installed.append(dep) if not bare: click.echo( u"Found {0} installed package(s), purging…".format(len(actually_installed)) ) command = "{0} uninstall {1} -y".format( escape_grouped_arguments(which_pip(allow_global=allow_global)), " ".join(actually_installed), ) if environments.is_verbose(): click.echo("$ {0}".format(command)) c = delegator.run(command) if not bare: click.echo(crayons.blue(c.out)) click.echo(crayons.green("Environment now purged and fresh!")) def do_init( dev=False, requirements=False, allow_global=False, ignore_pipfile=False, skip_lock=False, system=False, concurrent=True, deploy=False, pre=False, keep_outdated=False, requirements_dir=None, pypi_mirror=None, ): """Executes the init functionality.""" from .environments import PIPENV_VIRTUALENV cleanup_reqdir = False if not system: if not project.virtualenv_exists: try: do_create_virtualenv(pypi_mirror=pypi_mirror) except KeyboardInterrupt: cleanup_virtualenv(bare=False) sys.exit(1) # Ensure the Pipfile exists. if not deploy: ensure_pipfile(system=system) if not requirements_dir: cleanup_reqdir = True requirements_dir = vistir.compat.TemporaryDirectory( suffix="-requirements", prefix="pipenv-" ) # Write out the lockfile if it doesn't exist, but not if the Pipfile is being ignored if (project.lockfile_exists and not ignore_pipfile) and not skip_lock: old_hash = project.get_lockfile_hash() new_hash = project.calculate_pipfile_hash() if new_hash != old_hash: if deploy: click.echo( crayons.red( "Your Pipfile.lock ({0}) is out of date. Expected: ({1}).".format( old_hash[-6:], new_hash[-6:] ) ) ) click.echo(crayons.normal("Aborting deploy.", bold=True), err=True) requirements_dir.cleanup() sys.exit(1) elif (system or allow_global) and not (PIPENV_VIRTUALENV): click.echo( crayons.red( u"Pipfile.lock ({0}) out of date, but installation " u"uses {1}… re-building lockfile must happen in " u"isolation. Please rebuild lockfile in a virtualenv. " u"Continuing anyway…".format( crayons.white(old_hash[-6:]), crayons.white("--system") ), bold=True, ), err=True, ) else: if old_hash: msg = u"Pipfile.lock ({1}) out of date, updating to ({0})…" else: msg = u"Pipfile.lock is corrupted, replaced with ({0})…" click.echo( crayons.red(msg.format(old_hash[-6:], new_hash[-6:]), bold=True), err=True, ) do_lock( system=system, pre=pre, keep_outdated=keep_outdated, write=True, pypi_mirror=pypi_mirror, ) # Write out the lockfile if it doesn't exist. if not project.lockfile_exists and not skip_lock: # Unless we're in a virtualenv not managed by pipenv, abort if we're # using the system's python. if (system or allow_global) and not (PIPENV_VIRTUALENV): click.echo( "{0}: --system is intended to be used for Pipfile installation, " "not installation of specific packages. Aborting.".format( crayons.red("Warning", bold=True) ), err=True, ) click.echo("See also: --deploy flag.", err=True) requirements_dir.cleanup() sys.exit(1) else: click.echo( crayons.normal(u"Pipfile.lock not found, creating…", bold=True), err=True, ) do_lock( system=system, pre=pre, keep_outdated=keep_outdated, write=True, pypi_mirror=pypi_mirror, ) do_install_dependencies( dev=dev, requirements=requirements, allow_global=allow_global, skip_lock=skip_lock, concurrent=concurrent, requirements_dir=requirements_dir.name, pypi_mirror=pypi_mirror, ) if cleanup_reqdir: requirements_dir.cleanup() # Hint the user what to do to activate the virtualenv. if not allow_global and not deploy and "PIPENV_ACTIVE" not in os.environ: click.echo( "To activate this project's virtualenv, run {0}.\n" "Alternatively, run a command " "inside the virtualenv with {1}.".format( crayons.red("pipenv shell"), crayons.red("pipenv run") ) ) def pip_install( requirement=None, r=None, allow_global=False, ignore_hashes=False, no_deps=True, block=True, index=None, pre=False, selective_upgrade=False, requirements_dir=None, extra_indexes=None, pypi_mirror=None, trusted_hosts=None ): from notpip._internal import logger as piplogger src = [] if not trusted_hosts: trusted_hosts = [] trusted_hosts.extend(os.environ.get("PIP_TRUSTED_HOSTS", [])) if environments.is_verbose(): piplogger.setLevel(logging.INFO) if requirement: click.echo( crayons.normal("Installing {0!r}".format(requirement.name), bold=True), err=True, ) # Create files for hash mode. if requirement and not requirement.editable and (not ignore_hashes) and (r is None): fd, r = tempfile.mkstemp( prefix="pipenv-", suffix="-requirement.txt", dir=requirements_dir ) with os.fdopen(fd, "w") as f: f.write(requirement.as_line()) # Install dependencies when a package is a VCS dependency. if requirement and requirement.vcs: no_deps = False # Don't specify a source directory when using --system. if not allow_global and ("PIP_SRC" not in os.environ): src.extend(["--src", "{0}".format(project.virtualenv_src_location)]) # Try installing for each source in project.sources. if index: try: index_source = project.find_source(index) index_source = index_source.copy() except SourceNotFound: src_name = project.src_name_from_url(index) verify_ssl = True if index not in trusted_hosts else False index_source = {"url": index, "verify_ssl": verify_ssl, "name": src_name} sources = [index_source.copy(),] if extra_indexes: if isinstance(extra_indexes, six.string_types): extra_indexes = [extra_indexes,] for idx in extra_indexes: try: extra_src = project.find_source(idx) except SourceNotFound: src_name = project.src_name_from_url(idx) verify_ssl = True if idx not in trusted_hosts else False extra_src = {"url": idx, "verify_ssl": verify_ssl, "name": extra_src} if extra_src["url"] != index_source["url"]: sources.append(extra_src) else: for idx in project.pipfile_sources: if idx["url"] != sources[0]["url"]: sources.append(idx) else: sources = project.pipfile_sources if pypi_mirror: sources = [ create_mirror_source(pypi_mirror) if is_pypi_url(source["url"]) else source for source in sources ] if (requirement and requirement.editable) or not r: install_reqs = requirement.as_line(as_list=True) if requirement.editable and install_reqs[0].startswith("-e "): req, install_reqs = install_reqs[0], install_reqs[1:] editable_opt, req = req.split(" ", 1) install_reqs = [editable_opt, req] + install_reqs if not any(item.startswith("--hash") for item in install_reqs): ignore_hashes = True else: install_reqs = ["-r", r] with open(r) as f: if "--hash" not in f.read(): ignore_hashes = True # trusted_hosts = [ # "--trusted-host={0}".format(source.get("url")) for source in sources
<gh_stars>0 # -*- coding: utf-8 -*- """ Created on Mon Apr 1 10:51:23 2019 @author: Jarvis Functions """ import glob #used in to read in all the fles import pandas as pd import numpy as np from datetime import datetime# from datetime import timedelta import glob #used in to read in all the fles from scipy import stats import matplotlib import seaborn as sns from sklearn.linear_model import LinearRegression import GRIMM as GM import matplotlib.pylab as plt import codecs import AQMapfunctions as AQMap def droperror(data,col,limit,condition): ''' Error data cutter fuction, does not cut all the other data but set that columns error to None values ''' print("-------------------Cutting data", col, limit, condition) print(max(data[col])) if condition =="greater": indexNames = data[ data[col] > limit ].index data.drop(indexNames , inplace=True) #mask=data[col]>int(limit) elif condition =="less": indexNames = data[ data[col] < limit ].index data[col].drop(indexNames , inplace=True) # data.loc[mask,col]=None print(max(data[col])) return data def gencount(Data): """ does not work """ cols=[] for col in Data.columns: if "b" in col or "um" in col: #OPC bin data is b0 b1 b2 ... GRIMM bin data is 0.3um 0.5um .... # print(col) # Data[col].fillna(0,inplace=True) if col != "checksum": cols.append(col) print("Generate Total Partile count") print(cols) Data["ParticleCount"]=Data[cols].sum(axis=1) print(Data["ParticleCount"]) return Data def genratio(Data,col1,col2): rationame=col1+"VS"+col2 Data[rationame]=Data[col1]/Data[col2] return Data def GetDataset(Folder,sensors,ave): Data={}#set array to hold file names # folder=Folder #import data sent for sensor in sensors: # print(sensor) sfiles=[] for file in glob.glob(Folder+'***.csv'): # print(file) if sensor in file: sfiles.append(file) #1 print(file) sfiles=sorted(sfiles) # print(len(sfiles)) print(sfiles) data=pd.DataFrame() if len(sfiles)==1: with codecs.open(sfiles[0], "br",encoding="utf8", errors='ignore') as test: #print(test) row="" for i, row in enumerate(test): if "time" in row: print(i,row) header=i if "GRIMM" in sfiles[0]: #account for GRIMM data header=1 data=pd.read_csv(sfiles[0],header=header,error_bad_lines=False,engine='python') else: data=pd.read_csv(sfiles[0],header=header,error_bad_lines=False,engine='python') if "SDS" in sensor: data=data.loc[:,"time":"sds-pm10"] else: data.rename(columns={"pm2":"pm2.5","RH":"OPC-RH","T":"OPC-T","b24":"cut"},inplace=True) else: for file in sfiles: with codecs.open(file, "br",encoding="utf8", errors='ignore') as test: # print(test) for i, row in enumerate(test): if "time" in row: # print(i,row) header=i if "GRIMM" in file: dataloop=pd.read_csv(file,header=header,error_bad_lines=False,engine='python') else: dataloop=pd.read_csv(file,header=header,error_bad_lines=False,engine='python') if "SDS" in sensor: dataloop=dataloop.loc[:,"time":"sds-pm10"] else: dataloop.rename(columns={"pm2":"pm2.5","RH":"OPC-RH","Temp":"OPC-T","b24":"cut"},inplace=True) data=pd.concat([data,dataloop], ignore_index=False, axis=0,sort=True) #print(data.columns) if "GRIMM" not in sensor: split=sfiles[0].split("_") file=sfiles[0] sen="" #varable place holder if "OPCN3" in file: loc=file.find("OPCN3_") elif "SDS" in split[2]: loc=file.find("SDS011") elif "OPCN2" in split[2]: loc=file.find("OPCN2_") #find location Loc=file.find("AQ") Loc=Loc-1 Loc=file[len(Folder)-1:Loc] if "GPS" in file: Loc=Loc+"-GPS" sen=Loc+":"+sensor print(data.head(4)) data["time"]=pd.to_datetime(data.time) data.set_index('time', inplace=True) print("---------------"+sen+"-----------------------------") print(data.columns) #deal with non float varaible types for k,c in data.iteritems(): typ=str(c.dtype) if "float" not in typ: # print(k) data[k]=pd.to_numeric(data[k], errors='coerce') data[k]=data[k].astype('float64') data = data.loc[~data.index.duplicated(keep='first')] #drop error data for col in data.columns : if "pm" in col: try: data=droperror(data,col,1000,"greater") data=droperror(data,col,0,"less") except: pass elif col=="DHT-RH": print(col) data=droperror(data,col,100,"greater") # data=droperror(data,col,0,"less") if "SDS" in sensor: data=genratio(data,"sds-pm10","sds-pm2.5") #gen pm10/pm2.5 else: data=genratio(data,"pm10","pm2.5") #gen pm10/pm2.5 data=genratio(data,"pm2.5","pm1") #gen pm2.5/pm1 data=gencount(data) #generate calibrated RH and T based on DHT22 if "OPCN3" in sensor: if "DHT-RH" in data.columns: data=VariableCalPlot(data,"OPC-RH","DHT-RH",sen) data=VariableCalPlot(data,"OPC-T","DHT-T",sen) else: sen=sensor GRIMM1108size = [0.3,0.4,0.5,0.65,0.8,1,1.6,2,3,4,5,7.5,10,15,20] print(data) data["time"]=pd.to_datetime(data.time) data.time=data.time+timedelta(hours=1) #account for log being in UTC data.set_index('time', inplace=True, drop=True) data=GM.binmass(data,GRIMM1108size) data=gencount(data) data=genratio(data,"pm10","pm2") #gen pm10/pm2.5 data=genratio(data,"pm2","pm1") #gen pm2.5/pm1 #print(sen) if ave != "RAW": #If there is a avearege then get mean, if RAW dont take mean #print(data.dtypes) for k,c in data.iteritems(): typ=str(c.dtype) if "float" not in typ: # print(k) data[k]=pd.to_numeric(data[k], errors='coerce') data[k]=data[k].astype('float64') print(data.dtypes) data=data.resample(ave).mean() # print(data.columns) # print(data) Data[sen]=data return Data ############################################################################## ############################################################################## #################### def gencsv(Datadic,Location): """ Generate large data csv from data dictionay for sensors #Location=["JIMSOffice", "Lat-Lon", "53.805781", "-1.555851"] Current: SDS, OPCN2,OPCN3 """ #Cirrenty can only get one extra header, what i though adding the Sensors location for k ,v in Datadic.items(): print(k) #csv file name #Add timeperiod start=pd.to_datetime(min(v.index)) end=pd.to_datetime(max(v.index)) starttime=start.strftime("%Y%m%d") endtime=end.strftime("%Y%m%d") name="Dataset//"+Location[0]+'_'+k+'_'+starttime+'_'+endtime+'.csv' #Generate data info with open(name, "w+") as f: #Add timeperiod starttime=start.strftime("%Y-%m-%d %H:%M:%S") endtime=end.strftime("%Y-%m-%d %H:%M:%S") Time='Time Period,start:, '+starttime+',end:,'+endtime print(Time,file=f) #add sensors print('Sensor:,'+k,file=f) #add location Loc='Location:,'+Location[0]+',Lat-Lon,'+Location[2]+","+Location[3] print(Loc,file=f) #add interval time print("Interval time,10",file=f) #add data lenght Len='Data points,'+str(len(v))+',Days of Data,'+str((end-start).days) print(Len,file=f) f.close() #Append dic data with open(name, "a") as f: v.to_csv (f, index = True, header=True) f.close() print("--------------------") def dateparse (timestamp): time=pd.datetime.strptime(timestamp, '%Y-%m-%d %H:%M') return time def GetRPIdataV2(Folder,RPI,ave): ''' Function to get the all OPC Data from RPI and put it in a directory, Allowing further use and plotting. Need Folder location for data The RPI name in the CSV file ave- the averageing what wanted to be applied ave: 5T - 5 mim, 30T - 30 min, RAW - raw data no avarage 16/04/2019 -removes nan data, and deals with resample issue -find sen in the first line of CSV ''' #Get files sfiles = [] #set array to hold file names # folder=Folder for file in glob.glob(Folder+'***.csv'): if RPI in file: sfiles.append(file) # print(sfiles) sfiles=sorted(sfiles) #does them in a random order , so need to be sorted #Read the data into a panda array # print(sfiles) df=pd.DataFrame() sen=[] for loc in sfiles: #read in data dataloop=pd.read_csv(loc,skiprows=[1],error_bad_lines=False,parse_dates=True, date_parser=dateparse) sen=pd.read_csv(loc,skiprows=1, nrows=1,header=None) # print(dataloop.head(4)) #cut out error sen in first line sen=sen.T[1:len(sen.T)].as_matrix() sen=sen[:,0] sen=sen[pd.notnull(sen)] #return sen #re read csv but get the sensor info # try: #except: # pass # print(dataloop.columns) df=pd.concat([df,dataloop], ignore_index=True, axis=0) # print(df.head(4)) #print("sen",sen) df.set_index('time', inplace=True, drop=True) d={} OPC=0 OPCN3cols='b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10', 'b11', 'b12', 'b13', 'b14', 'b15', 'b16','b17', 'b18', 'b19', 'b20', 'b21', 'b22', 'b23', 'cut', 'period','FlowRate', 'Temp', 'RH', 'pm1', 'pm2', 'pm10', 'Check' OPCN2cols='b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10', 'b11', 'b12', 'b13', 'b14', 'b15','ct','FlowRate', 'temp_pressure','period', 'Check','pm1', 'pm2', 'pm10' # print(df.head(4)) #pd.DataFrame() for I,S in enumerate(sen): #print(I,S,sen[len(sen)-1]) #Set the Temp into its own array if "DHT" in S: # print("DHTcheck",len(df["RH"]),len(df["T"])) #gt data #df = df.reset_index() d[S]=pd.concat([df["RH"],df["T"]],axis=1) #d[S].columns=["RH","T"] # print(d[S].head(4)) # d[S]=pd.DataFrame(data=[df["RH"],df["T"]]) #transpose it d[S]=d[S] # d[S]=d[S].drop(d[S][d[S]["RH"]==None].index) #d[S]=d[S].drop(d[S][d[S]["RH"]>100].index) #make usre the index name is correct d[S].index.names = ['time'] #deal with None data d[S].replace('None', np.nan, inplace=True) d[S]=d[S][d[S]["RH"].notnull()] d[S]=d[S][d[S]["T"].notnull()] # print(d[S].head(4)) elif "OPC" in S: #find sensors index ScolInd=df.columns.get_loc(S) #if just one OPC or the last out of all OPC running get data if I == len(sen)-1: data=df.iloc[:,ScolInd:len(df.columns)] else: #If not the Last OPC, get data between the two sensors #Define the end index ScolInd2=df.columns.get_loc(sen[I+1]) data=df.iloc[:,ScolInd:ScolInd2] # print(data.head(3)) d[S]=pd.DataFrame(data) # print(S,d[S].columns) #add a colums name if "OPCN3" in S: sencols=("Name",)+OPCN3cols #set a name colums# elif "OPCN2" in S: sencols=("Name",)+OPCN2cols #set a name colums# d[S].columns=sencols #update the colums names to deal with the b1.1 or b1.2 for multiple sensors #cut the name colums as it get in the way of the avarage, and no longer needed #d[S]=d[S].rename(S) d[S].drop("Name",axis=1) #make shure the time index is named correctly d[S].index.names = ['time'] d[S]=d[S][d[S].notnull()] OPC=OPC+1 # pd.concat(d, axis=1) # print("issue",d[S].head(30)) #avarage the data if desired, if not type RAW if ave != "RAW": try: #mean data based on the set resample length d[S].index=pd.to_datetime(d[S].index) d[S]=d[S].resample(ave).mean() except: #deal with a data error what comes about when the None data is cut from the DHT22 d[S]=d[S].astype(float) d[S]=d[S].resample(ave).mean() # print("d",d) return d ,sen def GetRPIdataV1(Folder,RPI,ave,sen): ''' Function to get the all OPC Data from RPI and put it in a directory, Allowing further use and plotting. Need Folder location for data The RPI name in the CSV file ave- the averageing what wanted to be applied ave: 5T - 5 mim, 30T - 30 min, RAW - raw data no avarage #Created :11/04/2019 <NAME> For the first calibration test with the data looking like "JimsOffice_AQRPI5_(,DHT22_3,OPCN3_6,OPCN3_N1)_20190410.csv " This format is no longer used, as the CMAC people compalined, even though they wanted all the info in the heading in the first place. To account for this the sen varable was added, not futer version of the code will have an featuer to fins sen in the file. ''' #Get files sfiles = [] #set
<reponame>00sapo/OpenEWLD<gh_stars>1-10 import time import argparse import csv import json import operator import os import sys import traceback import zipfile import sqlite3 from collections import defaultdict from typing import List, Dict import discogs_client import requests from music21 import converter, stream, note, chord, text, musicxml, features, key, harmony def detectGenres(query: str, depth: int, num_of_items: int, client: discogs_client.Client)-> List: """ detect genres using discogs client. :depth: number of song to be used :num_of_items: number of items in the list returned :d: discogs client object :returns: list of list of tuples: [ [(genre1, occurrences), (genre2, occurrences), ...], [(style1, occurrences), (style2, occurrences), ...] ] """ r = client.search(query, type='release') r = r.sort('year') # populate genres_stats list genres_stats = defaultdict(int) styles_stats = defaultdict(int) r.per_page = depth l = r.page(1) for release in l: genres = release.fetch('genre') if genres is not None: for k in genres: genres_stats[k] += 1 if release.styles is not None: for k in release.styles: styles_stats[k] += 1 genres = [] styles = [] if len(genres_stats) > 0: twoMostCommon(num_of_items, genres_stats, genres) if len(styles_stats) > 0: twoMostCommon(num_of_items, styles_stats, styles) return [genres, styles] def twoMostCommon(num_of_items, dictionary, listOfTuples): for i in range(num_of_items): if i < len(dictionary): most_common_tuple = max(dictionary.items(), key=operator.itemgetter(1)) dictionary.pop(most_common_tuple[0]) listOfTuples.append(most_common_tuple) def getComposerInfoByUri(uri: str) -> Dict: """:returns: same as @getComposerInfoByName""" r = requests.get(uri, params={'format': 'json'}) data = json.loads(r.text) if checkingErrors(data): return getComposerInfoByUri(uri) composer = { 'correct_name': data.get('commonName'), 'home_country': data.get('homeCountry'), 'birth': formatDate(data.get('birthDate')), 'death': formatDate(data.get('deathDate')) } return composer def getComposerInfoByName(name: str) -> Dict: """ :retuns: a dictionary containing the birth date, the death date, the actual name and the nationality of the composer """ url = 'https://secondhandsongs.com/search/artist' params = { 'format': 'json', 'commonName': name } resp = requests.get(url=url, params=params) data = json.loads(resp.text) if checkingErrors(data): return getComposerInfoByName(name) if len(data.get('resultPage') or '') == 0: return None artist_page = data['resultPage'][0]['uri'] return getComposerInfoByUri(artist_page) def formatDate(date) -> str: if date is None: return None tokens = str(date).split('-', 3) returned = '' if len(tokens) == 1: returned = tokens[0] + '-00-00' elif len(tokens) == 2: returned = tokens[0] + '-' + tokens[1] + '-00' else: returned = tokens[0] + '-' + tokens[1] + '-' + tokens[2] return returned def getWorkInfo(title: str, composer: str) -> Dict: """ :returns: a dict with work info """ url = 'https://secondhandsongs.com/search/work' params = { 'format': 'json', 'credits': composer, 'title': title } resp = requests.get(url=url, params=params) data = json.loads(resp.text) if checkingErrors(data): return getWorkInfo(title, composer) if len(data.get('resultPage') or '') == 0: return None work_page = data['resultPage'][0]['uri'] r = requests.get(work_page, params={'format': 'json'}) data = json.loads(r.text) if checkingErrors(data): return getWorkInfo(title, composer) all_authors = [] for i in data.get('credits'): all_authors.append(i.get('uri')) work = { 'language': data.get('language'), 'correct_title': data.get('title'), 'correct_credits_uri': all_authors, } if data.get('original') is not None: original_performance_page = data['original'].get('uri') r = requests.get(original_performance_page, params={'format': 'json'}) data = json.loads(r.text) if checkingErrors(data): return getWorkInfo(title, composer) work['original_performance_date'] = formatDate(data.get('date')) else: work['original_performance_date'] = None return work def getTonality(score: stream.Score)-> key.Key: """ :returns: a key.Key object representing tonality detected by Krumhanslschumckler algorithm, only if its 'tonalCertainty()' is >= 0.9, None otherwise """ try: estimated = score.analyze('key.krumhanslschmuckler') except Exception: return None if estimated.tonalCertainty() < 0.9: return None else: return estimated def scoreIsCompatible(s: stream.Score) -> bool: """ parse a s and returs True if it is compatible with our symbolic representation system This also sets the 'timeSignature', 'keySignature', 'incipitType' and 'hasTriplets' in compatible stream.Score objects """ # no multiple voices are allowed print('checking compatibility...') sc = s.explode() # only one part is allowed print('\tchecking parts (only one allowed)...') if hasattr(sc, 'parts'): if len(sc.parts) > 1: return False # only one key signature is allowed print('\tchecking key signatures (only one allowed)...') signatures = s.flat.getKeySignatures() if len(signatures) > 1: for signature in signatures: if signature.asKey().name != signatures[0].asKey().name: return False # looking for the right tonality estimated = getTonality(s) if estimated is not None: s.keySignature = estimated.asKey() elif len(signatures) == 0: return False else: s.keySignature = signatures[0].asKey() # only one time signature is allowed print('\tchecking time signatures (only one allowed)...') signatures = s.flat.getTimeSignatures() if len(signatures) > 1: for signature in signatures: if signature.ratioString != signatures[0].ratioString: return False elif len(signatures) == 0: return False measure_length = signatures[0].numerator / signatures[0].denominator * 4 s.timeSignature = signatures[0] # no multiple white measures in incipit # setting incipit type print('\tchecking no multiple white measures at the beginning...') for m in s.recurse().getElementsByClass(stream.Measure): if len(m.recurse().getElementsByClass(note.Note)) == 0: m.containerHierarchy()[0].remove(m) else: n = m.recurse().getElementsByClass(note.Note)[0] if n.offset > 0: s.incipitType = 'acefalo' elif m.duration.quarterLength < measure_length: s.incipitType = 'anacrusi' else: s.incipitType = 'tetico' break # no multiple white measures at the end print('\tchecking no multiple white measures at the end...') it = s.recurse().getElementsByClass(stream.Measure) for m in reversed(it): if len(m.recurse().getElementsByClass(note.Note)) == 0: m.containerHierarchy()[0].remove(m) else: break print('\tchecking triplets and chords...') s.hasTriplets = False noChordSymbol = True it = s.flat.notesAndRests i = 0 while i < len(it): n = it[i] if type(n) is harmony.ChordSymbol: noChordSymbol = False i += 1 continue # no written chords allowed if type(n) is chord.Chord: print('----Chords are not allowed----') return False # triplets checking: if len(n.duration.tuplets) > 0: tuplet = n.duration.tuplets[0] # only triplets are allowed if tuplet.numberNotesActual > 3: print('----Only triplets are allowed----') return False # the following is to check the nesting level if tuplet.nestedLevel > 1: print('----only one nested level is allowed----') return False # only if it is contained in one measure if tuplet.totalTupletLength() > measure_length: print('----tuplets are allowed only in the same measure----') return False s.hasTriplets = True i += 3 else: i += 1 if noChordSymbol: print('----No chords annotated----') return False return True def copyToDir(s: stream.Score, dir: str): path = os.path.join(dir, s.metadata.composer + '-' + s.metadata.title + '.xml') s.write(fp=path) def fixStrangeCharacters(title, composer): composer = composer.translate( {ord(c): " " for c in "!@#$%^&*()[]{};:,./<>?\|~-=_+"}) composer = composer.translate( {ord(c): "'" for c in "`"}) title = title.translate( {ord(c): " " for c in "@#$%^&*()[]{};:./<>\|~-=_+"}) title = title.translate( {ord(c): "'" for c in "`"}) return title, composer def writeCompressedMxl(xml: str, filename_without_extension: str, filepath_without_extension: str): zf = zipfile.ZipFile(filepath_without_extension + '.mxl', mode='w', compression=zipfile.ZIP_DEFLATED) zi = zipfile.ZipInfo('META-INF' + os.sep + 'container.xml') zi.external_attr = 0o660 << 16 zf.writestr(zi, "<?xml version='1.0' encoding='UTF-8'?>" "<container><rootfiles><rootfile full-path='{0}.xml'/>" "</rootfiles></container>".format(filename_without_extension)) zi = zipfile.ZipInfo(filename_without_extension + '.xml') zi.compress_type = zipfile.ZIP_DEFLATED zi.external_attr = 0o660 << 16 zf.writestr(zi, xml) zf.close() def checkingErrors(response: Dict): # checking errors error = response.get('error') if error is not None: if error.get('code') == 10007: print('too many requests... wait a bit and retry') time.sleep(30) return True return False def secondHandSongsInfo(s: stream.Score): """ Queries secondhandsongs.com to gather work and composers info :returns: a list containing work and composers dictionaries """ title = s.metadata.title composer = s.metadata.composer if title == '' or title is None or composer == '' or composer is None: return {}, [] # removing strange characters title, composer = fixStrangeCharacters(title, composer) # trying to get work info print('querying secondhandsongs.com for work and artists info...') work = getWorkInfo(title, composer) if work is None: author = getComposerInfoByName(composer) if author is not None: work = getWorkInfo(title, author) if work is None: work = getWorkInfo(title, composer.split(None, 1)[0]) if work is None: work = getWorkInfo(title, '') if work is None: return {}, [] # trying to get composers info composers = [] for uri in work.get('correct_credits_uri'): composers.append(getComposerInfoByUri(uri)) return work, composers def collectData(s: stream.Score, new_dataset_dir: str, id: int, filename: str): """ :returns: a dictionary containing 'name of table': 'entry as tuple' or None if it is untreatable """ # collecting data print('collecting data...') work, composers = secondHandSongsInfo(s) if 'correct_title' in work and len(composers) > 0: # getting genres and styles print('querying discogs for genre detection...') discogs_query = work.get('correct_title') or '' for c in composers: correct_name = c.get('correct_name') or '' discogs_query += ' ' + correct_name if discogs_query == '': genres = styles = [] else: genres, styles = detectGenres( discogs_query, depth=5, num_of_items=2, client=d) else: genres = styles = [] composers.append({'correct_name': '[Unknown]'}) if s.metadata.title != '': work['correct_title'] = s.metadata.title else: work['correct_title'] = filename.split( '_-_', 1)[-1].replace('_', ' ') # lyrics print('writing lyrics and leadsheet...') lyrics = text.assembleAllLyrics(s).replace('\n', '') # computing file name output_dir = '' for c in composers: correct_name = c.get('correct_name') or '' output_dir += correct_name + '-' correct_title = work.get( 'correct_title').replace(' ', '_').replace('/', '-') output_dir =
# Copyright (c) 2009, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from . import console from . import edit from . import extension from . import panel from . import screen from . import setting from . import status from . import util from . import virtual from . import visual from . import void from . import window BUILD_FAILED = -1 BUILD_RETRY = -2 class Workspace (object): def __init__(self, bpl): self.__bpl = 1 self.set_bytes_per_line(bpl) assert self.__bpl >= 1, self.__bpl self.__windows = () self.__fileopss = [] self.__consoles = [] self.__vwindows = {} self.__bwindows = {} self.__twindows = {} self.__swindows = {} self.__cur_fileops = None self.__cur_console = None self.__set_window(None) self.__def_vwindow, self.__def_bwindow, self.__def_twindow, \ self.__def_swindow = self.__windows def __getattr__(self, name): if name == "_Workspace__cur_fileops": raise AttributeError(name) return getattr(self.__cur_fileops, name) def __len__(self): return len(self.__windows) def dispatch(self): return -1 def __set_buffer(self, o): self.__cur_fileops = o self.set_console(None, None) def set_console(self, con, arg): if isinstance(con, console.Console): self.__cur_console = con else: i = self.__fileopss.index(self.__cur_fileops) self.__cur_console = self.__consoles[i] self.__set_window(self.__cur_console) for o in self.__windows: if o: o.set_buffer(self.__cur_fileops) def dispatch(): self.require_full_repaint() return self.__cur_console.dispatch(arg) self.dispatch = dispatch def __set_window(self, con): # virtual window comes first if isinstance(con, console.Console): cls = util.get_class(con) else: cls = console.get_default_class() self.__windows = \ self.__get_virtual_window(cls), \ self.__get_binary_window(cls), \ self.__get_text_window(cls), \ self.__get_status_window(cls) def disconnect_window(self): # only leave virtual window to disable repaint self.__windows = self.__windows[0], def reconnect_window(self): # bring back windows of current console self.__set_window(self.__cur_console) def clone(self): ret = Workspace(self.__bpl) for i, o in enumerate(self.__fileopss): # do shallow copy, but don't use copy.copy() ret.add_buffer(i, o.clone(), self.__consoles[i]) return ret def add_buffer(self, i, fop, con): self.__fileopss.insert(i, fop) self.__consoles.insert(i, con) if len(self.__fileopss) == 1: self.__set_buffer(fop) def remove_buffer(self, i): o = self.__fileopss[i] i = self.__fileopss.index(o) if self.__cur_fileops == o: if i == len(self.__fileopss) - 1: self.switch_to_prev_buffer() else: self.switch_to_next_buffer() self.__fileopss.remove(self.__fileopss[i]) self.__consoles.remove(self.__consoles[i]) def switch_to_buffer(self, i): self.__set_buffer(self.__fileopss[i]) def switch_to_first_buffer(self): self.__set_buffer(self.__fileopss[0]) def switch_to_last_buffer(self): self.__set_buffer(self.__fileopss[-1]) def switch_to_next_buffer(self): if len(self.__fileopss) > 1: i = self.__fileopss.index(self.__cur_fileops) if i >= len(self.__fileopss) - 1: o = self.__fileopss[0] else: o = self.__fileopss[i + 1] self.__set_buffer(o) def switch_to_prev_buffer(self): if len(self.__fileopss) > 1: i = self.__fileopss.index(self.__cur_fileops) if i <= 0: o = self.__fileopss[len(self.__fileopss) - 1] else: o = self.__fileopss[i - 1] self.__set_buffer(o) def iter_buffer(self): for o in self.__fileopss: yield o def get_build_size(self): return self.get_height(), self.get_width() def get_height(self): bin_hei = self.__def_bwindow.get_size_y() tex_hei = self.__def_twindow.get_size_y() if setting.use_text_window \ else bin_hei sta_hei = self.__def_swindow.get_size_y() sta_hei_ = self.__get_status_window_height() # screen size may change before build() while resizing if not screen.test_soft_resize(): assert bin_hei == tex_hei, (bin_hei, tex_hei) assert sta_hei == sta_hei_, (sta_hei, sta_hei_) return bin_hei + sta_hei def get_width(self): bin_wid = self.__def_bwindow.get_size_x() tex_wid = self.__def_twindow.get_size_x() if setting.use_text_window \ else 0 sta_wid = self.__def_swindow.get_size_x() ret = bin_wid + tex_wid # screen size may change before build() while resizing if not screen.test_soft_resize(): assert ret == sta_wid, (bin_wid, tex_wid, sta_wid) return ret def __get_status_window_class(self): return status.get_status_canvas_class(), status.get_status_frame_class() def __get_status_window_height(self): return window.get_status_window_height( *self.__get_status_window_class()) # horizontal split def build_dryrun_delta(self, hei_delta, beg_delta): hei = self.__def_bwindow.get_size_y() + self.__def_swindow.get_size_y() beg = self.__def_bwindow.get_position_y() return self.build_dryrun(hei + hei_delta, beg + beg_delta) def build_dryrun(self, hei, beg): sta_hei = self.__get_status_window_height() min_hei = window.get_min_binary_window_height() + sta_hei return self.__test_build(hei, beg, min_hei) def build(self, hei, beg): sta_hei = self.__get_status_window_height() self.__build(hei, beg, sta_hei) return hei def build_fixed_size_dryrun(self, lpw, beg): sta_hei = self.__get_status_window_height() hei = window.get_min_binary_window_height(lpw) + sta_hei min_hei = window.get_min_binary_window_height() + sta_hei return self.__test_build(hei, beg, min_hei) def build_fixed_size(self, lpw, beg): sta_hei = self.__get_status_window_height() hei = window.get_min_binary_window_height(lpw) + sta_hei self.__build(hei, beg, sta_hei) return hei def __test_build(self, hei, beg, min_hei): scr_hei = screen.get_size_y() scr_wid = screen.get_size_x() if hei <= 0: return BUILD_FAILED if beg < 0: return BUILD_FAILED if scr_hei < min_hei: # screen height < minimum workspace height return BUILD_FAILED if hei < min_hei: # height < minimum workspace height return BUILD_FAILED if hei > scr_hei: # height > screen height return BUILD_FAILED if beg + hei >= scr_hei: # this workspace exceeds screen size return BUILD_FAILED if self.guess_width() > scr_wid: # test width return BUILD_FAILED return hei def __build(self, hei, beg, sta_hei): def vfn(o): siz = hei - sta_hei, self.__guess_virtual_window_width() pos = beg, 0 self.__build_window(o, siz, pos) def bfn(o): siz = hei - sta_hei, self.__guess_binary_window_width() pos = beg, 0 self.__build_window(o, siz, pos) def tfn(o): siz = hei - sta_hei, self.__guess_text_window_width() pos = beg, self.__def_bwindow.get_size_x() self.__build_window(o, siz, pos) def sfn(o): siz = sta_hei, self.guess_width() pos = beg + self.__def_bwindow.get_size_y(), 0 self.__build_window(o, siz, pos) self.__do_build(vfn, bfn, tfn, sfn) # vertical split def vbuild_dryrun(self, beg): sta_hei = self.__get_status_window_height() hei = console.get_position_y() min_hei = window.get_min_binary_window_height() + sta_hei return self.__test_vbuild(hei, beg, min_hei) def vbuild(self, beg): sta_hei = self.__get_status_window_height() hei = console.get_position_y() self.__vbuild(hei, beg, sta_hei) return beg def vbuild_fixed_size_dryrun(self, lpw, beg): sta_hei = self.__get_status_window_height() hei = window.get_min_binary_window_height(lpw) + sta_hei min_hei = window.get_min_binary_window_height() + sta_hei return self.__test_vbuild(hei, beg, min_hei) def vbuild_fixed_size(self, lpw, beg): sta_hei = self.__get_status_window_height() hei = window.get_min_binary_window_height(lpw) + sta_hei self.__vbuild(hei, beg, sta_hei) return beg def __test_vbuild(self, hei, beg, min_hei): scr_hei = screen.get_size_y() scr_wid = screen.get_size_x() if hei <= 0: return BUILD_FAILED if beg < 0: return BUILD_FAILED if scr_hei < min_hei: # screen height < minimum workspace height return BUILD_FAILED if hei < min_hei: # height < minimum workspace height return BUILD_FAILED if hei > scr_hei: # height > screen height (redundant) return BUILD_FAILED if hei >= scr_hei: # this workspace exceeds screen size return BUILD_FAILED if beg + self.guess_width() > scr_wid: # test width return BUILD_FAILED return hei def __vbuild(self, hei, beg, sta_hei): def vfn(o): siz = hei - sta_hei, self.__guess_virtual_window_width() pos = 0, beg self.__build_window(o, siz, pos) def bfn(o): siz = hei - sta_hei, self.__guess_binary_window_width() pos = 0, beg self.__build_window(o, siz, pos) def tfn(o): siz = hei - sta_hei, self.__guess_text_window_width() pos = 0, beg + self.__def_bwindow.get_size_x() self.__build_window(o, siz, pos) def sfn(o): siz = sta_hei, self.guess_width() pos = self.__def_bwindow.get_size_y(), beg self.__build_window(o, siz, pos) self.__do_build(vfn, bfn, tfn, sfn) # common for both horizontal and vertical split def __do_build(self, vfn, bfn, tfn, sfn): self.__build_virtual_window = vfn self.__build_binary_window = bfn self.__build_text_window = tfn self.__build_status_window = sfn # update first for potential window parameter changes # (if yes, dryrun must have been done with new parameters) for o in self.__vwindows.values(): if o: o.update() for o in self.__bwindows.values(): if o: o.update() for o in self.__twindows.values(): if o: o.update() for o in self.__swindows.values(): if o: o.update() for o in self.__vwindows.values(): self.__build_virtual_window(o) for o in self.__bwindows.values(): self.__build_binary_window(o) for o in self.__twindows.values(): self.__build_text_window(o) for o in self.__swindows.values(): self.__build_status_window(o) bin_hei = self.__def_bwindow.get_size_y() tex_hei = self.__def_twindow.get_size_y() if setting.use_text_window \ else bin_hei assert bin_hei == tex_hei, (bin_hei, tex_hei) bin_wid = self.__def_bwindow.get_size_x() tex_wid = self.__def_twindow.get_size_x() if setting.use_text_window \ else 0 sta_wid = self.__def_swindow.get_size_x() assert bin_wid + tex_wid == sta_wid, (bin_wid, tex_wid, sta_wid) def __build_virtual_window(self, o): self.__build_window(o, None, None) def __build_binary_window(self, o): self.__build_window(o, None, None) def __build_text_window(self, o): self.__build_window(o, None, None) def __build_status_window(self, o): self.__build_window(o, None, None) # Note that update_capacity() can't be called from window.Window.__init__, # because initial panel size hasn't been set yet, while get_bufmap() needs # panel size to update bufmap. def
"longer-side": "keep-aspect-ratio"}) resizes all images to a height/width of 224 pixels depending on which axis is shorter and resizes the other axis so that the aspect ratio is maintained. >>> aug = iaa.Resize({"height": (0.5, 0.75), "width": [16, 32, 64]}) resizes all images to a height of ``H*v``, where ``H`` is the original height and v is a random value sampled from the range ``0.5<=x<=0.75``. The width/x-axis of each image is resized to either 16 or 32 or 64 pixels. >>> aug = iaa.Resize(32, interpolation=["linear", "cubic"]) resizes all images to ``32x32`` pixels. Randomly uses either ``linear`` or ``cubic`` interpolation. """ def __init__(self, size, interpolation="cubic", name=None, deterministic=False, random_state=None): super(Resize, self).__init__(name=name, deterministic=deterministic, random_state=random_state) def handle(val, allow_dict): if val == "keep": return iap.Deterministic("keep") elif ia.is_single_integer(val): ia.do_assert(val > 0) return iap.Deterministic(val) elif ia.is_single_float(val): ia.do_assert(val > 0) return iap.Deterministic(val) elif allow_dict and isinstance(val, dict): if len(val.keys()) == 0: return iap.Deterministic("keep") elif any([key in ["height", "width"] for key in val.keys()]): ia.do_assert(all([key in ["height", "width"] for key in val.keys()])) if "height" in val and "width" in val: ia.do_assert(val["height"] != "keep-aspect-ratio" or val["width"] != "keep-aspect-ratio") size_tuple = [] for k in ["height", "width"]: if k in val: if val[k] == "keep-aspect-ratio" or val[k] == "keep": entry = iap.Deterministic(val[k]) else: entry = handle(val[k], False) else: entry = iap.Deterministic("keep") size_tuple.append(entry) return tuple(size_tuple) elif any([key in ["shorter-side", "longer-side"] for key in val.keys()]): ia.do_assert(all([key in ["shorter-side", "longer-side"] for key in val.keys()])) if "shorter-side" in val and "longer-side" in val: ia.do_assert(val["shorter-side"] != "keep-aspect-ratio" or val["longer-side"] != "keep-aspect-ratio") size_tuple = [] for k in ["shorter-side", "longer-side"]: if k in val: if val[k] == "keep-aspect-ratio" or val[k] == "keep": entry = iap.Deterministic(val[k]) else: entry = handle(val[k], False) else: entry = iap.Deterministic("keep") size_tuple.append(entry) return tuple(size_tuple) elif isinstance(val, tuple): ia.do_assert(len(val) == 2) ia.do_assert(val[0] > 0 and val[1] > 0) if ia.is_single_float(val[0]) or ia.is_single_float(val[1]): return iap.Uniform(val[0], val[1]) else: return iap.DiscreteUniform(val[0], val[1]) elif isinstance(val, list): if len(val) == 0: return iap.Deterministic("keep") else: all_int = all([ia.is_single_integer(v) for v in val]) all_float = all([ia.is_single_float(v) for v in val]) ia.do_assert(all_int or all_float) ia.do_assert(all([v > 0 for v in val])) return iap.Choice(val) elif isinstance(val, iap.StochasticParameter): return val raise Exception( "Expected number, tuple of two numbers, list of numbers, dictionary of " "form {'height': number/tuple/list/'keep-aspect-ratio'/'keep', " "'width': <analogous>}, dictionary of form {'shorter-side': number/tuple/list" "/'keep-aspect-ratio'/'keep', 'longer-side': <analogous>}," " or StochasticParameter, got %s." % (type(val),) ) self.size = handle(size, True) self.size_order = 'SL' if (isinstance(size, dict) and 'shorter-side' in size) else 'HW' if interpolation == ia.ALL: self.interpolation = iap.Choice(["nearest", "linear", "area", "cubic"]) elif ia.is_single_integer(interpolation): self.interpolation = iap.Deterministic(interpolation) elif ia.is_string(interpolation): self.interpolation = iap.Deterministic(interpolation) elif ia.is_iterable(interpolation): self.interpolation = iap.Choice(interpolation) elif isinstance(interpolation, iap.StochasticParameter): self.interpolation = interpolation else: raise Exception("Expected int or string or iterable or StochasticParameter, got %s." % ( type(interpolation),)) def _augment_images(self, images, random_state, parents, hooks): result = [] nb_images = len(images) samples_a, samples_b, samples_ip = self._draw_samples(nb_images, random_state, do_sample_ip=True) for i in sm.xrange(nb_images): image = images[i] sample_a, sample_b, sample_ip = samples_a[i], samples_b[i], samples_ip[i] h, w = self._compute_height_width(image.shape, sample_a, sample_b, self.size_order) image_rs = ia.imresize_single_image(image, (h, w), interpolation=sample_ip) result.append(image_rs) if not isinstance(images, list): all_same_size = (len(set([image.shape for image in result])) == 1) if all_same_size: result = np.array(result, dtype=np.uint8) return result def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): result = [] nb_heatmaps = len(heatmaps) samples_a, samples_b, samples_ip = self._draw_samples(nb_heatmaps, random_state, do_sample_ip=True) for i in sm.xrange(nb_heatmaps): heatmaps_i = heatmaps[i] sample_a, sample_b, sample_ip = samples_a[i], samples_b[i], samples_ip[i] h_img, w_img = self._compute_height_width(heatmaps_i.shape, sample_a, sample_b, self.size_order) h = int(np.round(h_img * (heatmaps_i.arr_0to1.shape[0] / heatmaps_i.shape[0]))) w = int(np.round(w_img * (heatmaps_i.arr_0to1.shape[1] / heatmaps_i.shape[1]))) h = max(h, 1) w = max(w, 1) # TODO change this to always have cubic or automatic interpolation? heatmaps_i_resized = heatmaps_i.resize((h, w), interpolation=sample_ip) heatmaps_i_resized.shape = (h_img, w_img) + heatmaps_i.shape[2:] result.append(heatmaps_i_resized) return result def _augment_segmentation_maps(self, segmaps, random_state, parents, hooks): result = [] nb_segmaps = len(segmaps) samples_h, samples_w, _ = self._draw_samples(nb_segmaps, random_state, do_sample_ip=False) for i in sm.xrange(nb_segmaps): segmaps_i = segmaps[i] sample_h, sample_w = samples_h[i], samples_w[i] h_img, w_img = self._compute_height_width(segmaps_i.shape, sample_h, sample_w, self.size_order) h = int(np.round(h_img * (segmaps_i.arr.shape[0] / segmaps_i.shape[0]))) w = int(np.round(w_img * (segmaps_i.arr.shape[1] / segmaps_i.shape[1]))) h = max(h, 1) w = max(w, 1) heatmaps_i_resized = segmaps_i.resize((h, w)) heatmaps_i_resized.shape = (h_img, w_img) + segmaps_i.shape[2:] result.append(heatmaps_i_resized) return result def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): result = [] nb_images = len(keypoints_on_images) samples_a, samples_b, _samples_ip = self._draw_samples(nb_images, random_state, do_sample_ip=False) for i in sm.xrange(nb_images): keypoints_on_image = keypoints_on_images[i] sample_a, sample_b = samples_a[i], samples_b[i] h, w = self._compute_height_width(keypoints_on_image.shape, sample_a, sample_b, self.size_order) new_shape = (h, w) + keypoints_on_image.shape[2:] keypoints_on_image_rs = keypoints_on_image.on(new_shape) result.append(keypoints_on_image_rs) return result def _augment_polygons(self, polygons_on_images, random_state, parents, hooks): return self._augment_polygons_as_keypoints( polygons_on_images, random_state, parents, hooks) def _draw_samples(self, nb_images, random_state, do_sample_ip=True): # TODO use SEED_MAX seed = random_state.randint(0, 10**6, 1)[0] if isinstance(self.size, tuple): samples_h = self.size[0].draw_samples(nb_images, random_state=ia.new_random_state(seed + 0)) samples_w = self.size[1].draw_samples(nb_images, random_state=ia.new_random_state(seed + 1)) else: samples_h = self.size.draw_samples(nb_images, random_state=ia.new_random_state(seed + 0)) samples_w = samples_h if do_sample_ip: samples_ip = self.interpolation.draw_samples(nb_images, random_state=ia.new_random_state(seed + 2)) else: samples_ip = None return samples_h, samples_w, samples_ip @classmethod def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order): imh, imw = image_shape[0:2] if size_order == 'SL': # size order: short, long if imh < imw: h, w = sample_a, sample_b else: w, h = sample_a, sample_b else: # size order: height, width h, w = sample_a, sample_b if ia.is_single_float(h): ia.do_assert(0 < h) h = int(np.round(imh * h)) h = h if h > 0 else 1 elif h == "keep": h = imh if ia.is_single_float(w): ia.do_assert(0 < w) w = int(np.round(imw * w)) w = w if w > 0 else 1 elif w == "keep": w = imw # at least the checks for keep-aspect-ratio must come after # the float checks, as they are dependent on the results # this is also why these are not written as elifs if h == "keep-aspect-ratio": h_per_w_orig = imh / imw h = int(np.round(w * h_per_w_orig)) if w == "keep-aspect-ratio": w_per_h_orig = imw / imh w = int(np.round(h * w_per_h_orig)) return h, w def get_parameters(self): return [self.size, self.interpolation, self.size_order] class CropAndPad(meta.Augmenter): """ Crop/pad images by pixel amounts or fractions of image sizes. Cropping removes pixels at the sides (i.e. extracts a subimage from a given full image). Padding adds pixels to the sides (e.g. black pixels). .. note :: This augmenter automatically resizes images back to their original size after it has augmented them. To deactivate this, add the parameter ``keep_size=False``. dtype support:: if (keep_size=False):: * ``uint8``: yes; fully tested * ``uint16``: yes; tested * ``uint32``: yes; tested * ``uint64``: yes; tested * ``int8``: yes; tested * ``int16``: yes; tested * ``int32``: yes; tested * ``int64``: yes; tested * ``float16``: yes; tested * ``float32``: yes; tested * ``float64``: yes; tested * ``float128``: yes; tested * ``bool``: yes; tested if (keep_size=True):: minimum of ( ``imgaug.augmenters.size.CropAndPad(keep_size=False)``, :func:`imgaug.imgaug.imresize_many_images` ) Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop (negative values) or pad (positive values) on each side of the image. Either this or the parameter `percent` may be set, not both at the same time. * If None, then pixel-based cropping/padding will not be used. * If int, then that exact number of pixels will always be cropped/padded. * If StochasticParameter, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to False, as then only one value will be sampled per image and used for all sides. * If a tuple of two ints with values ``a`` and ``b``, then each side will be cropped/padded by a random amount in the range ``a <= x <= b``. ``x`` is sampled per image side. If however `sample_independently` is set to False, only one value will be sampled per image and used for all sides. * If a tuple of four entries, then the entries represent top, right, bottom, left. Each entry may be a single integer (always crop/pad by exactly that value), a tuple of two ints
<reponame>drsaunders/RapDetector<gh_stars>0 # coding: utf-8 # In[7]: get_ipython().magic(u'matplotlib inline') get_ipython().magic(u"config InlineBackend.figure_formats = {'svg',}") import numpy as np import pandas as pd from IPython.display import display from sklearn.metrics import confusion_matrix from sklearn import metrics from sklearn import cross_validation import matplotlib.pyplot as plt from sklearn import preprocessing import seaborn as sns from sklearn.cross_validation import StratifiedKFold # # Cross-Language Rap Detector # A previous project of mine, RapItalia, was designed to approximately track the growth in popularity of rap in Italy, based on the publication date of rap songs that turned up in an Italian lyrics database. Did rap suddenly get popular there 10 years ago? I didn't have any genre information, only lyrical content, and so I made the assertion that rap songs could be distinguished by the number of words in the lyrics. I used a quite arbitrary cutoff for the number of words, specifically 500 words, and verified it only with a quick look at the songs that were identified as rap (I did made sure not to tune it based on the final output of my analysis). The results of this classification were then fed into my subsequent analysis of publication date by genre # # Making an assumption like that without evidence is pretty sloppy, so I set out to do better. I did two things: # 1. Linking a large body of lyrics with high quality genre information # 2. Evaluated whether songs can be correctly classified as rap or non-rap based on non-language-specific properties of the lyrics (like the number of words) # # Why non-language-specific? If I train on surface features of rap songs in every language, then I can identify rap songs in every language. This could also shed light generally on the surface-level lyrical differences between genres. # # First, some helper functions: # In[42]: def feature_distribution(data, which_feature, xlim=None): """Plot a comparison of the distribution of a particular feature between rap and non-rap.""" # Compute the range of feature values to use, if not specified in xlim med = np.median(train_data.loc[:,which_feature]) q75, q25 = np.percentile(train_data.loc[:,which_feature], [75 ,25]) iqr = q75 - q25 minx = med-(iqr*2.5) if minx < 0: minx = 0 maxx = med+(iqr*2.5) if xlim: minx=xlim[0] maxx=xlim[1] nbins = 20 bins = np.linspace(minx, maxx, nbins+1) # Plot the histograms plt.figure() sns.distplot(data.loc[data.is_rap==False,which_feature], bins=bins, label='Non-rap') sns.distplot(data.loc[data.is_rap==True,which_feature], bins=bins, label='Rap') plt.xlim(minx, maxx) plt.title(which_feature) plt.legend() def plot_feature_importance(features, fitted_forest): """Using a fitted random forest, make a cleveland dot plot of the computed feature importances. """ plt.figure() vals = fitted_forest.feature_importances_ sortorder = np.flipud(np.argsort(vals)) features = np.array(features) with sns.axes_style("whitegrid"): sns.stripplot(y=features[sortorder], x=vals[sortorder], orient="h", color='red', size=10) xl = plt.xlim() plt.xlim(0,xl[1]) plt.grid(axis='y',linestyle=':') plt.xlabel('Feature importance score') def examine_prediction(y, prediction, data, features, show_misidentified=True): """Given a prediction and ground truth (y), output statistics about the quality of the prediction.""" if type(features) == np.ndarray: features = features.tolist() cm = confusion_matrix(y, prediction) np.set_printoptions(precision=2) nonrap_misidentified = float(cm[0,1])/(cm[0,0]+cm[0,1]) print "Accuracy =\t%.1f%%" % (100*metrics.accuracy_score(y, prediction)) print "Rap songs correctly identified =\t%.1f%%" % (100*metrics.recall_score(y, prediction)) print "Songs incorrectly identified as rap =\t%.1f%%" % (100*(1-metrics.precision_score(y, prediction))) print "Non-rap songs identified as rap =\t%.1f%%" % (100*nonrap_misidentified) print "F1 score =\t%.3f" % metrics.f1_score(y, prediction) print('Confusion matrix') print(cm) if show_misidentified: print "Misidentified as rap: " display(data.loc[(prediction==1) & (y==0),['artist_name','title']+features]) print "Misidentified as nonrap: " display(data.loc[(prediction==0) & (y==1),['artist_name','title']+features]) def compute_features(lyrics, tdm_indices): """Create new superficial lyrics features. Return df with the new features in columns and one row per track.""" import time start = time.time() total_num_words = np.zeros(len(tdm_indices)) tdm = lyrics['tdm'].toarray() for i in range(len(tdm_indices)): total_num_words[i] = tdm[tdm_indices[i],:].sum() # print (time.time()-start)/60 word_lens = np.array([len(i) for i in lyrics['unstemmed_terms']],dtype=float) mean_word_length = np.zeros(len(tdm_indices)) for i in range(len(tdm_indices)): word_indices = tdm[tdm_indices[i],:].nonzero()[0] mean_word_length[i] = np.mean(word_lens[word_indices]) # print (time.time()-start)/60 median_word_rank = np.zeros(len(tdm_indices)) for i in range(len(tdm_indices)): word_indices = tdm[tdm_indices[i],:].nonzero()[0] median_word_rank[i] = np.median(word_indices) # print (time.time()-start)/60 mean_word_instances = np.zeros(len(tdm_indices)) for i in range(len(tdm_indices)): nums = tdm[tdm_indices[i],:] nz = nums[nums.nonzero()] mean_word_instances[i] = np.mean(nz) mean_word_instances = np.divide(mean_word_instances, total_num_words) # print (time.time()-start)/60 additional_features = pd.DataFrame(data={'total_num_words':total_num_words, 'mean_word_length':mean_word_length, 'median_word_rank':median_word_rank, 'mean_word_instances':mean_word_instances}) return additional_features # ## Creating the dataset # My source for lyrics was the [musicXmatch Dataset](http://labrosa.ee.columbia.edu/millionsong/musixmatch), which contains entries for 237,662 songs from the Million Songs Dataset. The MSD is a selection of one million songs based on [loose criteria](http://labrosa.ee.columbia.edu/millionsong/pages/how-did-you-choose-million-tracks) that included as many songs as possible by popular artists, and "extreme" songs in terms of audio characteristics. However the complete lyrics are not included, for copyright reasons: # # > The lyrics come in bag-of-words format: each track is described as the word-counts for a dictionary of the top 5,000 words across the set. # # This eliminates at least two surface-level properties I was interested in, the line lengths and the occurrence of extremely rare (or made-up) words. But it retains many more. I stored lyrics information in a dict called lyrics, which has at the heart of it a sparse matrix of counts of words (columns) by tracks (rows), sorted in decreasing order of word frequency across the corpus. # ``` # print lyrics['terms'][0:10] # print(lyrics['tdm'][:5,:].toarray()) # # ['i', 'the', 'you', 'to', 'and', 'a', 'me', 'it', 'not', 'in'] # [[10 0 17 ..., 0 0 0] # [28 15 2 ..., 0 0 0] # [ 5 4 3 ..., 0 0 0] # [16 4 0 ..., 0 0 0] # [39 30 10 ..., 0 0 0]]``` # # Although the Million Songs Dataset contains a large amount of metadata and data about the acoustic properties of songs (based on data compiled by [The Echo Nest](https://en.wikipedia.org/wiki/The_Echo_Nest), it does not have genre information. I got that from the [tagtraum genre annotations](www.tagtraum.com/msd_genre_datasets.html) to the Million Songs Dataset. It determines genre based on human-generated annotations from the All Music Guide, Last.fm, and the beaTunes Genre Dataset (BGD). There are up to two genres listed for every song, and I defined a track as being rap if it had "Rap" in either of the two genre slots. # # The tagtraum genre annotations covered 133,676 tracks, of which 55,726 intersected with the tracks in the musicXmatch lyrics training set, and 6,967 with the lyrics test set (the musicXmatch dataset has a standard train-test split). `generate_track_info.py` does this merge, and also adds track names and artist names by querying the MSD's sqlite3 database track_metadata.db, and saves the result as pickles. # In[15]: import pickle with open('train_track_info.pickle','r') as f: track_info = pickle.load(f) with open('train_lyrics_data.pickle','r') as f: lyrics = pickle.load(f) # ## Feature engineering: Surface text features # Compute new features for each track based on the lyrics. # In[44]: # Create features new_features = compute_features(lyrics, track_info.tdm_row) train_data = pd.concat([track_info, new_features],axis=1) features = new_features.columns.values # Examining the distribution of these variables between the two classes shows promising separation of tracks. # # `total_num_words` is the number of words in the track, which will be an underestimate of the true number of words because of all words beyond the 5000 most frequent in the lyrics dataset being eliminated. Nevertheless, it should have a very strong linear correlation with the true number of words. # In[51]: feature_distribution(train_data,'total_num_words',[0,1000]) # `mean_word_length` is the mean of the word lengths in a track, not weighting by frequency of the word. Again, not precisely the real values, since the lyrics have been stemmed (although I used the provided unstemming dictionary) but should correlate strongly. # In[52]: feature_distribution(train_data,'mean_word_length') # `median_word_rank` is the median of the horizontal index of the words in the term-document matrix, which reflects the rarity of the words used. # In[53]: feature_distribution(train_data,'median_word_rank',[0,500]) # `mean_word_instances` is the mean number of times a word is repeated in a track, divided by the total number of words in the track. It should reflect how repetitive the song is lyrically (e.g. because of a high ratio of choruses to verses) # In[54]: feature_distribution(train_data,'mean_word_instances') # ## How I Got to 95% Accuracy Without Really Trying: The Problem of Imbalanced Datasets # All my initial attempts to correctly detect rap songs using the features I created seemed to be very successful: 95% accuracy. But then I realized that this was due to rap songs being much less common than non-rap. # In[55]: pd.value_counts(track_info.is_rap) # In fact a dumb model that predicts that no songs will ever be rap achieves this accuracy, thanks to the imbalanced dataset. # In[56]: # Baseline
<filename>Author_Template/TexScanner.py #!/usr/bin/env python # T e x S c a n n e r . p y # # Defines a TexScanner class that can be used to extract LaTeX directives # from a .tex file. Call SetFile() to supply a reference to an already # open .tex file, then use successive calls to GetNextTexCommand() to get # all the tex directives and their parameters from the file. This probably # isn't of very general utility when it comes to parsing .tex files, but # it is useful for the ADASS editing purposes for which it was written, # where all that was wanted was to find graphics or citation commands and # see what files or references they were using. # # Parsing LaTeX files is tricky, and this code isn't perfect by any means. # There are lots of constructs that will fool it, usually into missing # commands that it ought to spot. If it manages to spot any problems, this # can be checked using a call to ParsedOK() and details of any problem can # be obtained by calling GetReport(). In testing, I have seen this parser # complain about unclosed braces that turn out to be due to an unescaped # comment character that LaTeX has not complained about. (Parsing problems # often seem to be associated with the use of '{','[' and '%' in math # expressions, and it may be that the parser needs to know about math mode.) # This code would benefit from a proper review and possible reworking some day. # # History: # 14th Jan 2016. Original version, KS. # 28th Jan 2016. GetNextWord() now allows for nesting. GetNextTexCommand() # now allows for any number of required and/or optional # arguments. The list it returns can be of any length, not # always one of three items, so calling code will need to # be modified. KS. # 1st Feb 2016. Interface to GetNextTexCommand() reworked to use a # callback for each new command found. This should make it # easier to introduce a recursive scan that catches commands # included within the arguments to other commands, although # at the moment this is not implemented. KS. # 11th Feb 2016. GetTexCommand() now does do a recursive scan through the # arguments of the commands it finds. KS. # 16th Feb 2016. Now catches multiple LaTeX directives in one argument, eg # \citetext{\citealp{l1980}, implemented in \citealp{w12}} # 30th Mar 2016. Now checks to see if '%' characters are comment characters # or just literal '%' that have been escaped. KS. # 7th Apr 2016. Fixed obscure parsing bug triggered by the sequence # "$\mu$m" which caused the scanning of the string containing # it to be terminated prematurely. It's because the code # had assumed that all \directives would be terminated by # a line break, space, or a '{' or '[', which is not of # course the case. Strange it took this long to show up. KS. # 12th Apr 2016. GetNextChar() now intercepts "\n" characters and treats # them as spaces - this is essentially what LaTeX does. KS. # 2nd May 2016. Fixed a parsing problem where a slightly unusual sequence # (involving a \newcommand definition on a single line} sent # the parser into infinite recursion. KS. # 24th Jul 2017. Fixed a problem seen in a .tex file that had an equation # that involved an escaped brace '\{' character. This was # being treated as a delimiter, with unfortunate results. # WasEscaped now records if the last character, as returned # by GetNextChar(), was escaped, and GetNextWord() uses this # to ignore escaped braces and parentheses. KS. # 25th Jul 2017. Added a check for a parser runaway, and the routines # ParsedOK() and GetReport(). GetNextWordFromString{} now # has the same tests for escaped braces as used by # GetNextWord(). KS. # 15th Aug 2017. Converted to run under Python3, using 2to3. Added # the importing of items from __future__ to allow this to # run under either Python2 or Python3. (In actuality, this # code worked unchanged under Python3, and since it doesn't # use print, doesn't really need that future import, but it # seems to be good practice anyway.) KS. # from __future__ import (print_function,division,absolute_import) import os import sys import string class TexScanner(object): def __init__(self): self.FileIdSet = False self.Escaped = False self.WasEscaped = False self.LastChar = "" self.LastWord = "" self.Line = 0 self.Problems = [] def SetFile(self,FileId) : # Needs to be called before any of the Get... routines. This passes # the Id of an open .tex file to the scanner. self.FileId = FileId self.FileIdSet = True self.Line = 0 self.Problems = [] def ParsedOK(self) : # Returns True if the .tex file parsed without problems. If it returns # False, GetReport() can be called to get a description of what happened. return (len(self.Problems) == 0) def GetReport(self) : # If the file parsed with problems, this returns a list of strings that # describe what happened. If the file parsed OK, this returns an empty # list. return self.Problems def GetNextChar(self) : # Returns the next character from a .tex file. If a comment character # ('%') is encountered, this skips to the end of the current line and # returns the newline character at the end. If the end of the file is # reached, or if the file is not open, this returns an empty string. # Allow for the case where the comment character was escaped, in which # case treat it as a literal '%'. LaTeX treats an end of line like a # space, and we intercept "\n" characters and turn them into spaces to # get the same effect. Char = "" if (self.FileIdSet) : Char = self.FileId.read(1) if (Char == "%") : if (not self.Escaped) : while (True) : Char = self.FileId.read(1) if (Char == "\n" or Char == "") : break self.WasEscaped = self.Escaped self.Escaped = (Char == "\\") if (Char == "\n") : Char = " " self.Line = self.Line + 1 return Char def GetNextLine(self) : # Returns the next line from a .tex file, with comments stripped out. # This means anything in a line from the first '%' character up to but # not including the final newline character is removed from the line. # It does mean than a line that starts with a '%' is returned as a # blank line - just a newline; it is not ignored completely. If the # end of the file is reached, or the file is not open, this returns # an empty string. (Note that this routine isn't used any more by the # other routines in this file, although it was originally.) Result = "" while (True) : Char = self.GetNextChar() Result = Result + Char if (Char == "\n" or Char == "") : break return Result def GetNextWord(self): # Returns the next 'word' from a .tex file. Comments are ignored, and # a 'word' is defined slightly unusually here in order to help with # processing LaTeX directives. Anything enclosed in {} or in [] # braces or brackets, including the enclosing {} or [] is considered # a word. Blanks and { and [ characters delimit words, as do the # ends of lines, which are assumed to be one or more of \n and \r # characters. Ends of lines are removed when encountered within # {} or [] characters. Word = "" # Find the first non-blank character (treating newline characters # and carriage returns as blanks). while (True) : if (self.LastChar != "") : Char = self.LastChar else : Char = self.GetNextChar() self.LastChar = "" if (Char != " " and Char != "\n" and Char != "\r") : break if (Char != "") : Word = Word + Char # If the word started with a { or [, then we ignore
<gh_stars>1-10 """ Remote ranorex library for robot framework All commands return True if they are executed correctly """ #iron python ]%imports import clr clr.AddReference('Ranorex.Core') clr.AddReference('System.Windows.Forms') import System.Windows.Forms import Ranorex #python imports from argparse import ArgumentParser from robotremoteserver import RobotRemoteServer import subprocess import logging import time import sys import os import xml.etree.ElementTree as ET import difflib log = logging.getLogger("RXCONNECTOR") class RanorexLibrary(object): """ Basic implementation of ranorex object calls for robot framework """ def __init__(self): self.debug = False self.model_loaded = False self.model = None @classmethod def __return_type(cls, locator): """ Function serves as translator from xpath into .net object that is recognized by ranorex. Returns supported object type. """ Ranorex.Validate.EnableReport = False Ranorex.Adapter.DefaultUseEnsureVisible = True supported_types = ['AbbrTag', 'AcronymTag', 'AddressTag', 'AreaTag', 'ArticleTag', 'AsideTag', 'ATag', 'AudioTag', 'BaseFontTag', 'BaseTag', 'BdoTag', 'BigTag', 'BodyTag', 'BrTag', 'BTag', 'Button', 'ButtonTag', 'CanvasTag', 'Cell', 'CenterTag', 'CheckBox', 'CiteTag', 'CodeTag', 'ColGroupTag', 'ColTag', 'Column', 'ComboBox', 'CommandTag', 'Container', 'ContextMenu', 'DataListTag', 'DdTag', 'DelTag', 'DetailsTag', 'DfnTag', 'DirTag', 'DivTag', 'DlTag', 'EmbedTag', 'EmTag', 'FieldSetTag', 'FigureTag', 'FontTag', 'Form', 'FormTag', 'Link', 'List', 'ListItem', 'MenuBar', 'MenuItem', 'Picture', 'ProgressBar', 'RadioButton', 'Row', 'ScrollBar', 'Slider', 'StatusBar', 'Table', 'Text', 'TitleBar', 'ToggleButton', 'Tree', 'TreeItem', 'Unknown'] splitted_locator = locator.split('/') if "[" in splitted_locator[-1]: ele = splitted_locator[-1].split('[')[0] else: ele = splitted_locator[-1] for item in supported_types: if ele.lower() == item.lower(): return item elif ele.lower() == '': raise AssertionError("No element entered") raise AssertionError("Element is not supported. Entered element: %s" % ele) def __create_element(self, locator): tries = 0 element_created = False while not element_created and tries < 3: try: tries += 1 element_type = self.__return_type(locator) element = getattr(Ranorex, element_type)(locator) element_created = True if self.debug: log.debug("Element at %s", locator) log.debug("Application object is %s", element) except: log.debug("Element %s not found, trying %s. time", element_type, tries) if not element_created: raise AssertionError("Element {} not found after {} tries".format(element_type, tries)) return element def start_debug(self): """ Starts to show debug messages on remote connector """ self.debug = True def stop_debug(self): """ Stops to show debug messages """ self.debug = False def click_element(self, locator, location=None): """ Clicks on element identified by locator and location :param locator: xpath selector of element :param location: relative coordinates of mouse click from top left corner of element, i.e. "x,y" :returns: True / False """ if self.debug: log.debug("Click Element") log.debug("Location: %s", location) element = self.__create_element(locator) try: if location == None: element.Click() return True else: if not isinstance(location, basestring): raise AssertionError("Location must be a string") location = [int(x) for x in location.split(',')] element.Click(Ranorex.Location(location[0], location[1])) return True except Exception as error: if self.debug: log.error("Failed because of %s", error) raise AssertionError(error) def check(self, locator): """ Check if element is checked. If not it check it. Only checkbox and radiobutton are supported. Uses Click() method to check it. :param locator: xpath selector of element :returns: True/False """ if self.debug: log.debug("Check") element = self.__create_element(locator) if not element.Element.GetAttributeValue('Checked'): element.Click() return True def check_event_viewer(self, *args): """ POWERSHELL is required for this functionality!!! Windows XP powershell: http://tinyurl.com/om8swcs Gets result from event_viewer according to *args :param args: arguments according to: http://tinyurl.com/lzh7wed :returns: Full output of powershell cmdlet """ if self.debug: log.debug("Arguments event viever: %s", args) cmd = r'powershell Get-EventLog %s' % ' '.join(args) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) out = p.communicate()[0] return out @classmethod def check_if_process_is_running(cls, process_name): """ Check if process with desired name is running. Returns name of process if running :param process_name: xpath selector of element :returns: True/False """ proc = subprocess.Popen(['tasklist'], stdout=subprocess.PIPE) out = proc.communicate()[0] return out.find(process_name) != -1 if out else False def clear_text(self, locator): """ Clears text from text box. Only element Text is supported. :param locator: xpath selector of element :returns: True/False """ if self.debug: log.debug("Clear Text") element = self.__create_element(locator) element.PressKeys("{End}{Shift down}{Home}{Shift up}{Delete}") return True def double_click_element(self, locator, location=None): """ Doubleclick on element identified by locator. It can click on desired location if requested. :param locator: xpath selector of element :param location: relative coordinates of mouse click from top left corner of element, i.e. "x,y" :returns: True/False """ if self.debug: log.debug("Double Click Element") log.debug("Location: %s", location) element = self.__create_element(locator) try: if location == None: element.DoubleClick() return True else: if not isinstance(location, basestring): raise AssertionError("Location must be a string") location = [int(x) for x in location.split(',')] element.DoubleClick(Ranorex.Location(location[0], location[1])) return True except Exception as error: raise AssertionError(error) def get_table(self, locator): """ Get content of table without headers :param locator: xpath string selecting element on screen :returns: two dimensional array with content of the table """ element = self.__create_element(locator) table = [[cell.Text for cell in row.Cells] for row in element.Rows] return table def get_element_attribute(self, locator, attribute): """ Get specified element attribute. :param locator: xpath selector of element :returns: True/False """ if self.debug: log.debug("Get Element Attribute %s", attribute) element = self.__create_element(locator) _attribute = element.Element.GetAttributeValue(attribute) if self.debug: log.debug("Found attribute value is: %s", _attribute) return _attribute def get_xml_attribute(self, xml_path, xpath, attrib): """ retrieves xml attribute using xpath """ if self.debug: log.debug("Retrieving %s attribute %s" % (xml_path, attrib)) log.debug("Xpath of element: %s" % xpath) with open(xml_path, 'r') as f: x = f.read().replace('\x00','') tree = ET.fromstring(x) x = tree.find(xpath) f.close() return x.attrib[attrib] def input_text(self, locator, text): """ input texts into specified locator. :param locator: xpath selector of element :param text: text value to input into element :returns: True/False """ if self.debug: log.debug("Input Text: %s", text) element = self.__create_element(locator) element.PressKeys(text) return True def make_diff(self, file_a, file_b): """ makes diff between two files :param file_a: first file to compare :param file_b: second file to compare :returns: output of diff (empty if no diff) """ if self.debug: log.debug("First file: %s, Second file: %s", file_a, file_b) ff = open(file_a, "r").readlines() sf = open(file_b, "r").readlines() output = "" for line in difflib.unified_diff(ff, sf): output += line return output def move_mouse_to(self, x, y): """ Move mouse to global coordinates :param x: int, position on x axis :param y: int, position on y axis :return: True on success """ if self.debug: log.debug("Moving mouse to: %s,%s", x, y) mouse = Ranorex.Mouse() mouse.MoveTo(int(x), int(y)) return True def right_click_element(self, locator, location=None): """ Rightclick on desired element identified by locator. Location of click can be used. :param locator: xpath selector of element :param location: relative coordinates of mouse click from top left corner of element, i.e. "x,y" :returns: True/False """ if self.debug: log.debug("Right Click Element") log.debug("Location: %s", location) element = self.__create_element(locator) try: if location == None: element.Click(System.Windows.Forms.MouseButtons.Right) return True else: if not isinstance(location, basestring): raise AssertionError("Locator must be a string") location = [int(x) for x in location.split(',')] element.Click(System.Windows.Forms.MouseButtons.Right, Ranorex.Location(location[0], location[1])) return True except Exception as error: raise AssertionError(error) def run_application(self, app): """ Runs local application. :param app: path to application to execute :returns: True/Ranorex exception """ if self.debug: log.debug("Run Application %s", app) log.debug("Working dir: %s", os.getcwd()) Ranorex.Host.Local.RunApplication(app) return True def run_application_with_parameters(self, app, params): """ Runs local application with parameters. :param app: path to application to execute :param params: parameters for application :returns: True/False """ if self.debug: log.debug("Run Application %s With Parameters %s", app, params) log.debug("Working dir: %s", os.getcwd()) Ranorex.Host.Local.RunApplication(app, params) return True def run_script(self, script_path): """ Runs script on remote machine and returns stdout and stderr. :param script_path: path to script to execute :returns: dictionary with "stdout" and "stderr" as keys """ if self.debug: log.debug("Run Script %s", script_path) log.debug("Working dir: %s", os.getcwd()) wd = os.path.dirname(script_path) process = subprocess.Popen([script_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=wd) output = process.communicate() return {'stdout':output[0], 'stderr':output[1]} def run_script_with_parameters(self, script_path, *params): """ Runs script on remote machine and returns stdout and stderr. :param script_path: path to script to execute :param params: parameters for script :returns: dictionary with "stdout" and "stderr" as keys """ params = list(params) wd = os.path.dirname(script_path) if self.debug: log.debug("Run Script %s with params %s", script_path, params) log.debug("Working dir: %s", os.getcwd()) process = subprocess.Popen([script_path] + params, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=wd) output = process.communicate() return {'stdout':output[0], 'stderr':output[1]} def scroll(self, locator, amount): """ Hover above selected element and scroll positive or negative amount of wheel turns :param locator: xpath pointing to desired element :param amount: int - amount of scrolling :return: None """ element = self.__create_element(locator) mouse = Ranorex.Mouse() mouse.MoveTo(element.Element) mouse.ScrollWheel(int(amount)) def select_by_index(self, locator, index): """ Selects item from combobox according to index. :param locator: xpath
#COUNTS if personal_query == '': if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==True)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==True)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==True)).count()) else: if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) #INSERT ROLL infoLevel.append(infoeLevelTemp) #PER USER elif str(request.vars['level'])=="4": if len(usersProject) == 0: session.flash = T('Report no visible: There are no parameters required to display the report.') redirect(URL('teacher_reports', 'validate_laboratory_management',vars=dict(level='3', month = str(request.vars['month']), type_L = str(request.vars['type_U']), type_U = str(request.vars['type_U']), querySearch=personal_query))) for userPT in usersProject: userP=db(db.auth_user.username==userPT).select().first() if userP is None: userP=db(db.validate_laboratory_log.user_name==userPT).select().first() userP=userP.user_name else: userP=userP.username infoeLevelTemp = [] #ID OF USER infoeLevelTemp.append(userP) #NAME OF USER infoeLevelTemp.append(userP) #COUNTS if personal_query == '': if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==True)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==True)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==True)).count()) else: if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) #INSERT USER infoLevel.append(infoeLevelTemp) #DATA elif str(request.vars['level'])=="5": #COUNTS if personal_query == '': if str(request.vars['type_L'])=="all": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.validation_type==True)).select() elif str(request.vars['type_L'])=="i": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==True)).select() elif str(request.vars['type_L'])=="u": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==True)).select() elif str(request.vars['type_L'])=="d": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==True)).select() else: if str(request.vars['type_L'])=="all": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() elif str(request.vars['type_L'])=="i": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() elif str(request.vars['type_L'])=="u": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() elif str(request.vars['type_L'])=="d": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==True)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() grid = [] for data in allData: grid.append(data.id) if len(grid) == 0: grid.append(-1) #GRID db.validate_laboratory_log.id.readable = False db.validate_laboratory_log.id.writable = False db.validate_laboratory_log.user_name.readable = False db.validate_laboratory_log.user_name.writable = False db.validate_laboratory_log.roll.readable = False db.validate_laboratory_log.roll.writable = False db.validate_laboratory_log.academic_id.readable = False db.validate_laboratory_log.academic_id.writable = False db.validate_laboratory_log.project.readable = False db.validate_laboratory_log.project.writable = False db.validate_laboratory_log.yearp.readable = False db.validate_laboratory_log.yearp.writable = False db.validate_laboratory_log.period.readable = False db.validate_laboratory_log.period.writable = False db.validate_laboratory_log.validation_type.readable = False db.validate_laboratory_log.validation_type.writable = False db.validate_laboratory_log.id_validate_laboratory.readable = False db.validate_laboratory_log.id_validate_laboratory.writable = False grid = SQLFORM.grid(db.validate_laboratory_log.id.belongs(grid), csv=False, create=False, editable=False, deletable=False, paginate=9, searchable=False) return dict(personal_query=personal_query, infoLevel=infoLevel, period=period, project=project, month=month, roll=roll, userP=userP, grid=grid) #************************************************************************************************************************************* #************************************************************************************************************************************* #*****************************************************MANAGEMENT REPORT REPLACING***************************************************** @auth.requires_login() @auth.requires(auth.has_membership('Teacher')) def laboratory_replacing_management_export(): #**************************************************************************************************************** #**************************************************************************************************************** #***************************************CHECK IF THERE IS A PERSONALIZED QUERY*********************************** import cpfecys period = cpfecys.current_year_period() from datetime import datetime infoLevel = [] personal_query = '' if request.vars['querySearch'] is not None and str(request.vars['querySearch']) != "": #PERSONALIZED QUERY SURE WORK try: personal_query = int(request.vars['querySearch']) countI = db(db.validate_laboratory_log.academic==personal_query).count() except: personal_query = '' #**************************************************************************************************************** #**************************************************************************************************************** #******************************************VERIFY THAT ACCURATE PARAMETERS*************************************** try: #CHECK THAT THE LEVEL OF REPORT IS VALID if request.vars['level'] is not None and (int(request.vars['level'])<1 or int(request.vars['level'])>5): session.flash = T('Not valid Action.') redirect(URL('default','index')) #VERIFY THAT THE PARAMETERS OF EACH LEVEL BE VALID if request.vars['level'] is not None: #LEVEL MORE THAN 1 if int(request.vars['level'])>1: #CHECK IF THE TYPE OF REPORT IS VALID if request.vars['type_L'] is None or (str(request.vars['type_L'])!="all" and str(request.vars['type_L'])!="i" and str(request.vars['type_L'])!="u" and str(request.vars['type_L'])!="d"): session.flash = T('Not valid Action.') redirect(URL('default','index')) #CHECK IF THE PROJECT IS VALID project = VALIDATE_PROJECT(request.vars['project'],'validate_laboratory_log') if project is None: session.flash = T('Not valid Action.') redirect(URL('default','index')) #LEVEL MORE THAN 2 if int(request.vars['level'])>2: #CHECK IF THE TYPE OF REPORT IS VALID if request.vars['type_U'] is None or (str(request.vars['type_U'])!="all" and str(request.vars['type_U'])!="i" and str(request.vars['type_U'])!="u" and str(request.vars['type_U'])!="d"): session.flash = T('Not valid Action.') redirect(URL('default','index')) #CHECK IF THE MONTH IS VALID month = VALIDATE_MONTH(request.vars['month']) if month is None: session.flash = T('Not valid Action.') redirect(URL('default','index')) #OBTAIN ROLES roles=GET_ROLES('validate_laboratory_log') #LEVEL MORE THAN 4 if int(request.vars['level'])>3: #CHECK IF THE ROLE IS VALID roll = VALIDATE_ROLE(request.vars['roll'],'validate_laboratory_log') if roll is None: session.flash = T('Not valid Action.') redirect(URL('default','index')) #OBTAIN USERS usersProject = GET_USERS(project,roll,'validate_laboratory_log') #LEVEL MORE THAN 5 if int(request.vars['level'])>4: #CHECK IF THE USER IS VALID userP = VALIDATE_USER(project,roll,request.vars['userP'],'validate_laboratory_log') if userP is None: session.flash = T('Not valid Action.') redi*rect(URL('default','index')) except: session.flash = T('Not valid Action.') redirect(URL('default','index')) #**************************************************************************************************************** #**************************************************************************************************************** #*****************************************************REPORT***************************************************** #TITLE infoLevel = [] infoeLevelTemp=[] infoeLevelTemp.append('Universidad de San Carlos de Guatemala') infoLevel.append(infoeLevelTemp) infoeLevelTemp=[] infoeLevelTemp.append('Facultad de Ingeniería') infoLevel.append(infoeLevelTemp) infoeLevelTemp=[] infoeLevelTemp.append('Escuela de Ciencias y Sistemas') infoLevel.append(infoeLevelTemp) #TYPE OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Type')) infoeLevelTemp.append(T('Report Equivalence Management Laboratory')) infoLevel.append(infoeLevelTemp) #PERIOD OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Period')) infoeLevelTemp.append(T(period.period.name)+' '+str(period.yearp)) infoLevel.append(infoeLevelTemp) #ALL SEMESTERS if request.vars['level']=='1' or request.vars['level'] is None: #MIDDLE LINE OF REPORT infoeLevelTemp=[] infoLevel.append(infoeLevelTemp) #LABLE DETAIL OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Detail')) infoLevel.append(infoeLevelTemp) #LABELS OF DATA OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Course')) infoeLevelTemp.append(T('Total inserted')) infoeLevelTemp.append(T('Total modified')) infoeLevelTemp.append(T('Total out')) infoLevel.append(infoeLevelTemp) for project in db((db.user_project.assigned_user==auth.user.id)&((db.user_project.period <= period.id) & ((db.user_project.period + db.user_project.periods) > period.id))).select(): infoeLevelTemp = [] #NAME OF PERIOD infoeLevelTemp.append(project.project.name) #COUNTS if personal_query == '': infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.project.name)&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)).count()) infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.project.name)&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)).count()) infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.project.name)&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)).count()) else: infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.project.name)&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.project.name)&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.project.name)&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) #INSERT PROJECT infoLevel.append(infoeLevelTemp) #PER MONTH elif str(request.vars['level'])=="2": #PROJECT OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Course')) infoeLevelTemp.append(project.name) infoLevel.append(infoeLevelTemp) #MIDDLE LINE OF REPORT infoeLevelTemp=[] infoLevel.append(infoeLevelTemp) #LABLE DETAIL OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Detail')) infoLevel.append(infoeLevelTemp) #LABELS OF DATA OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Month')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(T('Total inserted')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(T('Total modified')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(T('Total out')) infoLevel.append(infoeLevelTemp) for month in GET_MONTH_PERIOD(): start = datetime.strptime(str(period.yearp) + '-' + str(month[0]) +'-01', "%Y-%m-%d") if month[2]==1: end = datetime.strptime(str(period.yearp+1) + '-' + str(month[2]) +'-01', "%Y-%m-%d") else: end = datetime.strptime(str(period.yearp) + '-' + str(month[2]) +'-01', "%Y-%m-%d") infoeLevelTemp = [] #NAME OF MONTH infoeLevelTemp.append(month[1]+' '+str(period.yearp)) #COUNTS if personal_query == '': if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(start))&(db.validate_laboratory_log.date_log<str(end))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(start))&(db.validate_laboratory_log.date_log<str(end))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(start))&(db.validate_laboratory_log.date_log<str(end))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)).count()) else: if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(start))&(db.validate_laboratory_log.date_log<str(end))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(start))&(db.validate_laboratory_log.date_log<str(end))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(start))&(db.validate_laboratory_log.date_log<str(end))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) #INSERT MONTH infoLevel.append(infoeLevelTemp) #PER ROL elif str(request.vars['level'])=="3": #PROJECT OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Course')) infoeLevelTemp.append(project.name) infoLevel.append(infoeLevelTemp) #MONTH OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Month')) infoeLevelTemp.append(month[0]) infoLevel.append(infoeLevelTemp) #MIDDLE LINE OF REPORT infoeLevelTemp=[] infoLevel.append(infoeLevelTemp) #LABLE DETAIL OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Detail')) infoLevel.append(infoeLevelTemp) #LABELS OF DATA OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Role')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(T('Total inserted')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(T('Total modified')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(T('Total out')) infoLevel.append(infoeLevelTemp) for rollT in roles: roll=db(db.auth_group.role==rollT).select().first() if roll is None: roll=db(db.validate_laboratory_log.roll==rollT).select().first() roll=roll.roll else: roll=roll.role infoeLevelTemp = [] #NAME OF ROLE infoeLevelTemp.append(T('Rol '+roll)) #COUNTS if personal_query == '': if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)).count()) else: if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) #INSERT ROLL infoLevel.append(infoeLevelTemp) #PER USER elif str(request.vars['level'])=="4": #PROJECT OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Course')) infoeLevelTemp.append(project.name) infoLevel.append(infoeLevelTemp) #MONTH OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Month')) infoeLevelTemp.append(month[0]) infoLevel.append(infoeLevelTemp) #ROLE OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Role')) infoeLevelTemp.append(T('Rol '+roll)) infoLevel.append(infoeLevelTemp) #MIDDLE LINE OF REPORT infoeLevelTemp=[] infoLevel.append(infoeLevelTemp) #LABLE DETAIL OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Detail')) infoLevel.append(infoeLevelTemp) #LABELS OF DATA OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('User')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(T('Total inserted')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(T('Total modified')) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(T('Total out')) infoLevel.append(infoeLevelTemp) for userPT in usersProject: userP=db(db.auth_user.username==userPT).select().first() if userP is None: userP=db(db.validate_laboratory_log.user_name==userPT).select().first() userP=userP.user_name else: userP=userP.username infoeLevelTemp = [] #NAME OF USER infoeLevelTemp.append(userP) #COUNTS if personal_query == '': if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)).count()) else: if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="i": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="u": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) if str(request.vars['type_L'])=="all" or str(request.vars['type_L'])=="d": infoeLevelTemp.append(db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).count()) #INSERT USER infoLevel.append(infoeLevelTemp) #DATA elif str(request.vars['level'])=="5": #PROJECT OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Course')) infoeLevelTemp.append(project.name) infoLevel.append(infoeLevelTemp) #MONTH OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Month')) infoeLevelTemp.append(month[0]) infoLevel.append(infoeLevelTemp) #ROLE OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Role')) infoeLevelTemp.append(T('Rol '+roll)) infoLevel.append(infoeLevelTemp) #ROLE OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('User')) infoeLevelTemp.append(userP) infoLevel.append(infoeLevelTemp) #MIDDLE LINE OF REPORT infoeLevelTemp=[] infoLevel.append(infoeLevelTemp) #LABLE DETAIL OF REPORT infoeLevelTemp=[] infoeLevelTemp.append(T('Detail')) infoLevel.append(infoeLevelTemp) #COUNTS if personal_query == '': if str(request.vars['type_L'])=="all": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.validation_type==False)).select() elif str(request.vars['type_L'])=="i": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)).select() elif str(request.vars['type_L'])=="u": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)).select() elif str(request.vars['type_L'])=="d": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)).select() else: if str(request.vars['type_L'])=="all": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() elif str(request.vars['type_L'])=="i": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='insert')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() elif str(request.vars['type_L'])=="u": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='update')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() elif str(request.vars['type_L'])=="d": allData = db((db.validate_laboratory_log.period==T(period.period.name))&(db.validate_laboratory_log.yearp==period.yearp)&(db.validate_laboratory_log.project==project.name)&(db.validate_laboratory_log.date_log>=str(month[1]))&(db.validate_laboratory_log.date_log<str(month[2]))&(db.validate_laboratory_log.roll==str(roll))&(db.validate_laboratory_log.user_name==str(userP))&(db.validate_laboratory_log.operation_log=='delete')&(db.validate_laboratory_log.validation_type==False)&(db.validate_laboratory_log.academic.like('%'+str(personal_query)+'%'))).select() #TITLE OF TABLE infoeLevelTemp=[] infoeLevelTemp.append(T('Operation')) infoeLevelTemp.append(T('Academic')) infoeLevelTemp.append(T('Grade Before')) infoeLevelTemp.append(T('Grade After')) infoeLevelTemp.append(T('Date')) infoeLevelTemp.append(T('Description')) infoLevel.append(infoeLevelTemp) for operation in allData: infoeLevelTemp=[] infoeLevelTemp.append(operation.operation_log) infoeLevelTemp.append(operation.academic) infoeLevelTemp.append(operation.before_grade) infoeLevelTemp.append(operation.after_grade) infoeLevelTemp.append(operation.date_log) infoeLevelTemp.append(operation.description) return dict(filename='ReporteGestionEquivalencia', csvdata=infoLevel) @auth.requires_login() @auth.requires(auth.has_membership('Teacher')) def laboratory_replacing_management(): #**************************************************************************************************************** #**************************************************************************************************************** #***************************************CHECK IF THERE IS A PERSONALIZED QUERY*********************************** import cpfecys period = cpfecys.current_year_period() from datetime import datetime infoLevel = [] personal_query = '' makeRedirect = False project=None month=None roll=None userP=None grid=None if request.vars['querySearch'] is not None and str(request.vars['querySearch']) != "": #PERSONALIZED QUERY SURE WORK try: personal_query = int(request.vars['querySearch']) countI = db(db.validate_laboratory_log.academic==personal_query).count() if request.vars['searchT'] is not None and str(request.vars['searchT']) == 'T': makeRedirect = True except: response.flash = T('The query is not valid. The report is displayed without applying any query.') personal_query = '' if makeRedirect == True: redirect(URL('teacher_reports', 'laboratory_replacing_management',vars=dict(level = 5, project = str(request.vars['project']), month = str(request.vars['month']), roll = str(request.vars['roll']), userP = str(request.vars['userP']), type_L=request.vars['type_L'], type_U=request.vars['type_U'], querySearch=request.vars['querySearch']))) #**************************************************************************************************************** #**************************************************************************************************************** #******************************************VERIFY THAT ACCURATE PARAMETERS*************************************** try: #CHECK THAT THE LEVEL OF REPORT IS VALID if request.vars['level'] is not None and (int(request.vars['level'])<1 or int(request.vars['level'])>5): session.flash = T('Not valid Action.') redirect(URL('default','index'))
mluv.dudy.rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) dvdys = ( mluv.dvdy.rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) sstrains = ( mluv.sstrain.rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) nstrains = ( mluv.nstrain.rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) divs = ( mluv.div.rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) vorts = ( mluv.vort.rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) dudzs = ( mluv.dudz.isel(index=0) .rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) dvdzs = ( mluv.dvdz.isel(index=0) .rolling(t=nperseg, center=True) .reduce(np.average, weights=sig.hann(nperseg)) .dropna("t") ) # Make spline fits. fdudx = itpl.RectBivariateSpline(dudxs.t.data, -dudxs.z.data, dudxs.data) fdvdx = itpl.RectBivariateSpline(dvdxs.t.data, -dvdxs.z.data, dvdxs.data) fdudy = itpl.RectBivariateSpline(dudys.t.data, -dudys.z.data, dudys.data) fdvdy = itpl.RectBivariateSpline(dvdys.t.data, -dvdys.z.data, dvdys.data) fsstrain = itpl.RectBivariateSpline(sstrains.t.data, -sstrains.z.data, sstrains.data) fnstrain = itpl.RectBivariateSpline(nstrains.t.data, -nstrains.z.data, nstrains.data) fdiv = itpl.RectBivariateSpline(divs.t.data, -divs.z.data, divs.data) fvort = itpl.RectBivariateSpline(vorts.t.data, -vorts.z.data, vorts.data) fdudz = itpl.RectBivariateSpline(dudzs.t.data, -dudzs.z.data, dudzs.data) fdvdz = itpl.RectBivariateSpline(dvdzs.t.data, -dvdzs.z.data, dvdzs.data) # Interpolate using splines. dudxt = fdudx(c4["t"], -c4["z"], grid=False) dvdxt = fdvdx(c4["t"], -c4["z"], grid=False) dudyt = fdudy(c4["t"], -c4["z"], grid=False) dvdyt = fdvdy(c4["t"], -c4["z"], grid=False) sstraint = fsstrain(c4["t"], -c4["z"], grid=False) nstraint = fnstrain(c4["t"], -c4["z"], grid=False) divt = fdiv(c4["t"], -c4["z"], grid=False) vortt = fvort(c4["t"], -c4["z"], grid=False) dudzt = fdudz(c4["t"], -c4["z"], grid=False) dvdzt = fdvdz(c4["t"], -c4["z"], grid=False) c4["dudxt"] = dudxt c4["dvdxt"] = dvdxt c4["dudyt"] = dudyt c4["dvdyt"] = dvdyt c4["sstraint"] = sstraint c4["nstraint"] = nstraint c4["divt"] = divt c4["vortt"] = vortt c4["dudzt"] = dudzt c4["dvdzt"] = dvdzt # %% # %% ########################## SAVE CORRECTED FILES ########################## io.savemat("../data/virtual_mooring_interpolated.mat", c4) io.savemat("../data/virtual_mooring_interpolated_windowed.mat", c4w) # %% [markdown] # Signal to noise ratios. # %% print("Estimating signal to noise ratios.") M = munch.munchify(utils.loadmat('../data/virtual_mooring_interpolated.mat')) # shear strain dsstrain = M.sstrain - M.sstraint SNR_sstrain = M.sstrain.var(axis=0)/dsstrain.var(axis=0) np.save('../data/SNR_sstrain', SNR_sstrain, allow_pickle=False) # normal strain dnstrain = M.nstrain - M.nstraint SNR_nstrain = M.nstrain.var(axis=0)/dnstrain.var(axis=0) np.save('../data/SNR_nstrain', SNR_nstrain, allow_pickle=False) # zonal shear ddudz = M.dudz - M.dudzt SNR_dudz = M.dvdz.var(axis=0)/ddudz.var(axis=0) np.save('../data/SNR_dudz', SNR_dudz, allow_pickle=False) # meridional shear ddvdz = M.dvdz - M.dvdzt SNR_dvdz = M.dvdz.var(axis=0)/ddvdz.var(axis=0) np.save('../data/SNR_dvdz', SNR_dvdz, allow_pickle=False) # divergence ddiv = M.div - M.divt SNR_nstrain = M.div.var(axis=0)/ddiv.var(axis=0) np.save('../data/SNR_div', SNR_nstrain, allow_pickle=False) # %% [markdown] # <a id="corrected"></a> # %% [markdown] # ## Generate interpolated data. # # Set parameters again. # %% # Corrected levels. # heights = [-540., -1250., -2100., -3500.] # Filter cut off (hours) tc_hrs = 40.0 # Start of time series (matlab datetime) t_start = 734494.0 # Length of time series max_len = N_data = 42048 # Data file raw_data_file = "moorings.mat" # Index where NaNs start in u and v data from SW mooring sw_vel_nans = 14027 # Sampling period (minutes) dt_min = 15.0 # Window length for wave stress quantities and mesoscale strain quantities. nperseg = 2 ** 9 # Spectra parameters window = "hanning" detrend = "constant" # Extrapolation/interpolation limit above which data will be removed. dzlim = 100.0 # Integration of spectra parameters. These multiple N and f respectively to set # the integration limits. fhi = 1.0 flo = 1.0 flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of # the near inertial portion. # When bandpass filtering windowed data use these params multiplied by f and N filtlo = 0.9 # times f filthi = 1.1 # times N # Interpolation distance that raises flag (m) zimax = 100.0 dt_sec = dt_min * 60.0 # Sample period in seconds. dt_day = dt_sec / 86400.0 # Sample period in days. N_per_day = int(1.0 / dt_day) # Samples per day. # %% [markdown] # Polynomial fits first. # %% print("REAL MOORING INTERPOLATION") print("**Generating corrected data**") moorings = load_data.load_my_data() cc, nw, ne, se, sw = moorings # Generate corrected moorings T = np.concatenate([m["T"].flatten() for m in moorings]) S = np.concatenate([m["S"].flatten() for m in moorings]) z = np.concatenate([m["z"].flatten() for m in moorings]) u = np.concatenate([m["u"].flatten() for m in moorings]) v = np.concatenate([m["v"].flatten() for m in moorings]) g = np.concatenate([m["gamman"].flatten() for m in moorings]) # SW problems... nans = np.isnan(u) | np.isnan(v) print("Calculating polynomial coefficients.") pzT = np.polyfit(z[~nans], T[~nans], 3) pzS = np.polyfit(z[~nans], S[~nans], 3) pzg = np.polyfit(z[~nans], g[~nans], 3) pzu = np.polyfit(z[~nans], u[~nans], 2) pzv = np.polyfit(z[~nans], v[~nans], 2) # %% # Additional height in m to add to interpolation height. hoffset = [-25.0, 50.0, -50.0, 100.0] pi2 = np.pi * 2.0 nfft = nperseg levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)] Nclevels = len(levis) spec_kwargs = { "fs": 1.0 / dt_sec, "window": window, "nperseg": nperseg, "nfft": nfft, "detrend": detrend, "axis": 0, } idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index idx0 = idx1 - nperseg # Window start index N_windows = len(idx0) # Initialise the place holder dictionaries. c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data c4 = {"N_levels": Nclevels} # Dictionary for processed data # Dictionaries for raw, windowed data from outer moorings nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"} moorings5w = [nw5w, ne5w, se5w, sw5w] # Dictionaries for processed, windowed data from outer moorings nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"} moorings4w = [nw4w, ne4w, se4w, sw4w] # Initialised the arrays of windowed data varr = ["t", "z", "u", "v", "gamman", "S", "T", "P"] for var in varr: c12w[var] = np.zeros((nperseg, N_windows, cc["N_levels"])) var4 = [ "t", "z", "u", "v", "gamman", "dudx", "dvdx", "dudy", "dvdy", "dudz", "dvdz", "dgdz", "nstrain", "sstrain", "vort", "N2", ] for var in var4: c4w[var] = np.zeros((nperseg, N_windows, Nclevels)) for var in var4: c4[var] = np.zeros((N_windows, Nclevels)) # Initialised the arrays of windowed data for outer mooring varro = ["z", "u", "v"] for var in varro: for m5w in moorings5w: m5w[var] = np.zeros((nperseg, N_windows, 5)) var4o = ["z", "u", "v"] for var in var4o: for m4w in moorings4w: m4w[var] = np.zeros((nperseg, N_windows, Nclevels)) # for var in var4o: # for m4 in moorings4: # m4[var] = np.zeros((N_windows, 4)) # Window the raw data. for i in range(N_windows): idx = idx0[i] for var in varr: c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :] for i in range(N_windows): idx = idx0[i] for var in varro: for m5w, m in zip(moorings5w, moorings[1:]): m5w[var][:, i, :] = m[var][idx : idx + nperseg, :] c4["interp_far_flag"] = np.full_like(c4["u"], False, dtype=bool) print("Interpolating properties.") # Do the interpolation for i in range(Nclevels): # THIS hoffset is important!!! c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i] for j in range(N_windows): zr = c12w["z"][:, j, levis[i]] ur = c12w["u"][:, j, levis[i]] vr = c12w["v"][:, j, levis[i]] gr = c12w["gamman"][:, j, levis[i]] Sr = c12w["S"][:, j, levis[i]] Tr = c12w["T"][:, j, levis[i]] Pr = c12w["P"][:, j, levis[i]] zi = c4["z"][j, i] c4["interp_far_flag"][j, i] = np.any(np.min(np.abs(zr - zi), axis=-1) > zimax) c4w["z"][:, j, i] = np.mean(zr, axis=-1) c4w["t"][:, j, i] = c12w["t"][:, j, 0] c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu) c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv) c4w["gamman"][:, j, i] = moo.interp_quantity(zr, gr, zi, pzg) dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1) dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1) dgdzr = np.gradient(gr, axis=-1) / np.gradient(zr, axis=-1) N2 = seawater.bfrq(Sr.T, Tr.T, Pr.T, cc["lat"])[0].T # Instead of mean, could moo.interp1d c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1) c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1) c4w["dgdz"][:, j, i] = np.mean(dgdzr, axis=-1) c4w["N2"][:, j, i] = np.mean(N2, axis=-1) for m5w, m4w in zip(moorings5w, moorings4w): if (m5w["id"] == "sw") & ( idx1[j] > sw_vel_nans ): # Skip this level because of NaNs zr = m5w["z"][:, j, (0, 1, 3, 4)] ur = m5w["u"][:, j, (0, 1, 3, 4)] vr = m5w["v"][:, j, (0, 1, 3, 4)] else: zr = m5w["z"][:, j, :] ur = m5w["u"][:, j, :] vr = m5w["v"][:, j, :] m4w["z"][:, j, i] = np.full((nperseg), zi) m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu) m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv) print("Filtering windowed data.") fcorcpd = np.abs(cc["f"]) * 86400 / pi2 Nmean = np.sqrt(np.average(c4w["N2"], weights=sig.hann(nperseg), axis=0)) varl = ["u", "v", "gamman"] for var in varl: c4w[var + "_hib"] = np.zeros_like(c4w[var]) c4w[var + "_lo"] = utils.butter_filter( c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0 ) c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"] for i in range(Nclevels): for j in range(N_windows): Nmean_ = Nmean[j, i] * 86400 / pi2 for var in varl: c4w[var + "_hib"][:, j, i] = utils.butter_filter( c4w[var][:, j, i], (filtlo * fcorcpd, filthi * Nmean_), fs=N_per_day, btype="band", ) varl = ["u", "v"] for var in varl: for m4w in moorings4w: m4w[var + "_lo"] = utils.butter_filter( m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0 ) m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"] c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"] print("Calculating horizontal gradients.") # Calculate horizontal gradients for j in range(N_windows): ll = np.stack( ([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1 ) uv = np.stack( ( [m4w["u_lo"][:, j, :] for m4w in moorings4w], [m4w["v_lo"][:, j, :] for m4w in moorings4w], ), axis=1, ) dudx, dudy, dvdx, dvdy, vort, _ = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv) nstrain = dudx - dvdy sstrain = dvdx + dudy c4w["dudx"][:, j, :] = dudx c4w["dudy"][:, j, :] = dudy c4w["dvdx"][:, j, :] = dvdx
can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment] try: # Trigger any extra validation we need to do. try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise # _validate_conn() starts the connection to an HTTPS proxy # so we need to wrap errors with 'ProxyError' here too. except ( OSError, NewConnectionError, TimeoutError, BaseSSLError, CertificateError, SSLError, ) as e: new_e: Exception = e if isinstance(e, (BaseSSLError, CertificateError)): new_e = SSLError(e) if isinstance( new_e, (OSError, NewConnectionError, TimeoutError, SSLError) ) and (conn and conn._connecting_to_proxy): new_e = _wrap_proxy_error(new_e) raise new_e # conn.request() calls http.client.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. try: if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) # We are swallowing BrokenPipeError (errno.EPIPE) since the server is # legitimately able to close the connection after sending a valid response. # With this behaviour, the received response is still readable. except BrokenPipeError: pass except OSError as e: # MacOS/Linux # EPROTOTYPE is needed on macOS # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ if e.errno != errno.EPROTOTYPE: raise # Reset the timeout for the recv() on the socket read_timeout = timeout_obj.read_timeout if conn.sock: # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching # the exception and assuming all BadStatusLine exceptions are read # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( self, url, f"Read timed out. (read timeout={read_timeout})" ) conn.sock.settimeout(read_timeout) # Receive the response from the server try: httplib_response = conn.getresponse() except (BaseSSLError, OSError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise log.debug( '%s://%s:%s "%s %s %s" %s %s', self.scheme, self.host, self.port, method, url, # HTTP version conn._http_vsn_str, # type: ignore[attr-defined] httplib_response.status, httplib_response.length, ) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( "Failed to parse headers (url=%s): %s", self._absolute_url(url), hpe, exc_info=True, ) return httplib_response def _absolute_url(self, path: str) -> str: return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self) -> None: """ Close all pooled connections and disable the pool. """ if self.pool is None: return # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except queue.Empty: pass # Done. def is_same_host(self, url: str) -> bool: """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith("/"): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, _, host, port, *_ = parse_url(url) scheme = scheme or "http" if host is not None: host = _normalize_host(host, scheme=scheme) # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen( # type: ignore[override] self, method: str, url: str, body: Optional[_TYPE_BODY] = None, headers: Optional[Mapping[str, str]] = None, retries: Optional[Union[Retry, bool, int]] = None, redirect: bool = True, assert_same_host: bool = True, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, pool_timeout: Optional[int] = None, release_conn: Optional[bool] = None, chunked: bool = False, body_pos: Optional[_TYPE_BODY_POSITION] = None, **response_kw: Any, ) -> BaseHTTPResponse: """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = to_str(_encode_target(url)) else: url = to_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] <https://github.com/urllib3/urllib3/issues/651> release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() # type: ignore[attr-defined] headers.update(self.proxy_headers) # type: ignore[union-attr] # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block.
<filename>refine_code/GUI_hm_dark_style.py import cv2 import sys import os.path as osp import time from matplotlib import pyplot as plt import numpy as np from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES; import os from pyclustering.cluster import cluster_visualizer; from pyclustering.cluster.kmedoids import kmedoids; import pickle from pyclustering.utils import read_sample; from pyclustering.utils import timedcall; import functools import transfer_lab import copy import math import qdarkstyle from PyQt5.QtWidgets import QApplication,QScrollArea,QColorDialog, QWidget,QMainWindow,QLabel, QPushButton,QFormLayout,QMessageBox,QLineEdit,QAction, QFileDialog,QVBoxLayout,QHBoxLayout,QGroupBox,QFrame from PyQt5.QtGui import QIcon,QImage,QPixmap,QPalette,QColor from PyQt5.QtCore import pyqtSlot,Qt, QThread,pyqtSignal class ColorChangeHue(QMainWindow): def __init__(self, parent_win, dom_colors): super().__init__() self.title = 'Interactively change the color by hue matching' self.left = 50 self.top = 50 self.width = 480 self.height = 150 self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height) self.wid = QWidget(self) self.setCentralWidget(self.wid) self.mainlayout = QVBoxLayout() self.wid.setLayout(self.mainlayout) print(dom_colors) self.h_dom_colors = [] for dc in dom_colors: self.h_dom_colors.append(transfer_lab.rgb2hue_opencv(transfer_lab.lab2rgb_opencv(dc))[0]*2) self.parent_win = parent_win self.add_hue_matchings() self.source_hue_idx=None self.target_hue_idx=None self.src_tar_mp = {} self.setStyleSheet("QLabel{background:white;}" "QLabel{color:rgb(100,100,100,250);font-size:15px;font-weight:bold;font-family:Roman times;}" "QLabel:hover{color:rgb(100,100,100,120);}") def add_hue_matchings(self): self.top_hue_layout = QHBoxLayout() self.mid_hue_layout = QHBoxLayout() self.bottom_hue_layout = QHBoxLayout() self.mainlayout.addLayout(self.top_hue_layout) self.mainlayout.addLayout(self.mid_hue_layout) self.mainlayout.addLayout(self.bottom_hue_layout) bin_size=10 self.binsz = bin_size for idx, w in enumerate(range(0,360,bin_size)): mid_h = int((idx + 0.5) * bin_size) if mid_h >= 360: break q = QLabel() q.setAutoFillBackground(True) q.setText(" ") q.setMargin(5) p = q.palette() ss = False for hu_range in range(int(mid_h-bin_size*0.5), int(mid_h+bin_size*0.5)): if hu_range in self.h_dom_colors: ss=True if ss: # p.setColor(q.backgroundRole(),QColor.fromHsl(mid_h,255,128)) q.setStyleSheet( "background-color: %s" % (QColor.fromHsl(mid_h,255,128).name())) else: # p.setColor(q.backgroundRole(), QColor.fromHsl(mid_h, 255, 128,alpha=0)) q.setStyleSheet( "background-color: %s" % (QColor.fromHsl(mid_h, 255, 128,alpha=0).name(QColor.HexArgb))) q.setPalette(p) q.mousePressEvent = functools.partial(self.choose_source_hue, source_object=q, index=idx) self.top_hue_layout.addWidget(q) for idx, w in enumerate(range(0,360,bin_size)): if int((idx + 0.5) * bin_size) >= 360: break q = QLabel() q.setAutoFillBackground(True) q.setText(" ") q.setMargin(5) p = q.palette() # p.setColor(q.backgroundRole(),QColor.fromHsl(int((idx+0.5)*bin_size), 255, 128, alpha=0)) q.setPalette(p) qc = QColor.fromHsl(int((idx + 0.5) * bin_size), 255, 128, alpha=0) q.setStyleSheet("background-color: %s" % (qc.name(QColor.HexArgb))) self.mid_hue_layout.addWidget(q) for idx, w in enumerate(range(0,360,bin_size)): if int((idx + 0.5) * bin_size) >= 360: break q = QLabel() q.setAutoFillBackground(True) q.setMargin(5) p = q.palette() p.setColor(q.backgroundRole(),QColor.fromHsl(int((idx+0.5)*bin_size),255,128)) q.setPalette(p) q.setStyleSheet("background-color: %s" % (QColor.fromHsl(int((idx + 0.5) * bin_size), 255, 128).name())) q.mousePressEvent = functools.partial(self.choose_target_hue, source_object=q, index=idx) self.bottom_hue_layout.addWidget(q) def choose_source_hue(self,event,source_object=None,index=None): self.source_hue_idx = index pass def choose_target_hue(self, event,source_object=None,index=None): self.target_hue_idx = index if self.source_hue_idx is not None: q = self.mid_hue_layout.itemAt(self.source_hue_idx).widget() p = q.palette() p.setColor(q.backgroundRole(), QColor.fromHsl(int((self.target_hue_idx + 0.5) * self.binsz), 255, 128, alpha=255)) q.setPalette(p) qc = QColor.fromHsl(int((self.target_hue_idx + 0.5) * self.binsz), 255, 128, alpha=255) q.setStyleSheet("background-color: %s" % (qc.name(QColor.HexArgb))) self.src_tar_mp[self.source_hue_idx] = self.target_hue_idx #更新目标颜色 并刷新结果 self.parent_win.update_target_colors(self.src_tar_mp,self.binsz) pass class App(QMainWindow): def __init__(self): super().__init__() self.title = 'Translucent Image Recoloring through Homography Estimation' self.left = 10 self.top = 10 self.width = 640 self.height = 480 self.initUI() self._dom_colors = None def initUI(self): self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height) self.wid = QWidget(self) self.setCentralWidget(self.wid) self.mainLayout = QHBoxLayout() self.leftLayout = QVBoxLayout() self.rightLayout = QVBoxLayout() self.formlayout = QFormLayout() self.rightLayout.addLayout(self.formlayout) self.clayout = QHBoxLayout() self.dom_color_layout = QVBoxLayout() self.tar_color_layout = QVBoxLayout() self.clayout.addLayout(self.dom_color_layout) self.clayout.addLayout(self.tar_color_layout) self.rightLayout.addLayout(self.clayout) self.rightLayout.addStretch(1) self.scroll = QScrollArea() self.scroll.setLayout(self.rightLayout) self.scroll.setWidgetResizable(True) self.scroll.setFixedHeight(600) self.scroll.setFrameStyle(QFrame.NoFrame) self.rr_new = QVBoxLayout() self.rr_new.addWidget(self.scroll) self.wid.setLayout(self.mainLayout) self.imageView = QLabel("waiting to read image") qb = QGroupBox("Image view") qb.setLayout(self.leftLayout) self.leftLayout.addWidget(self.imageView) qb2 = QGroupBox("Color") qb2.setLayout(self.rr_new) self.mainLayout.addWidget(qb, 2) self.mainLayout.addWidget(qb2, 1) # self.mainLayout.addLayout(self.rightLayout,1) # self.mainLayout.addLayout(self.rr_new, 1) # button = QPushButton("button") # button.setToolTip("this is an example.") # button.move(100, 70) # button.clicked.connect(self.on_click) # self.leftLayout.addWidget(button) # self.textbox = QLineEdit() # self.textbox.move(20,20) # self.textbox.resize(280,40) # self.textbox.setText("8") # self.formlayout.addRow(QLabel("Cluster Number:"),self.textbox) mainmenu = self.menuBar() filemenu = mainmenu.addMenu("File") imagemenu = mainmenu.addMenu("Image") helpmenu = mainmenu.addMenu("Help") openImageButton = QAction('Open', self) openImageButton.setShortcut('Ctrl+O') openImageButton.setToolTip("Open Image") openImageButton.triggered.connect(self.open_image) filemenu.addAction(openImageButton) exitButton = QAction('Exit',self) exitButton.setShortcut('Ctrl+Q') exitButton.setStatusTip("Exit app") exitButton.triggered.connect(self.close) filemenu.addAction(exitButton) self.add_act_short(imagemenu, "Find Dom Colors","Ctrl+F","FDC",self.find_dom_colors) self.add_act_short(imagemenu, "Change Color by Hue","Ctrl+H","CCH",self.change_color_by_hue) self.add_act_short(imagemenu, "Save result","Ctrl+S","Save the debuged_result",self.save_image_domc_tarc) self.add_act_short(helpmenu, "About","","About",self.about_software) self.show() def add_act_short(self,parent_menu,name,shortcut,tooltip,triggered_func): tempAction = QAction(name,self) tempAction.setShortcut(shortcut) tempAction.setToolTip(tooltip) tempAction.triggered.connect(triggered_func) parent_menu.addAction(tempAction) def clear_layout(self,lay_out): for i in reversed(range(lay_out.count())): lay_out.itemAt(i).widget().setParent(None) def layout_widgets(self,layout): return (layout.itemAt(i) for i in range(layout.count())) @pyqtSlot() def save_image_domc_tarc(self): # save resulted Image 带时间戳 # save domc image File & pickle File # save tarc image File & pickle File base_dir = osp.abspath(__file__) tre_dir = osp.join(osp.dirname(base_dir), "debug_result") file_name_w_ext = osp.basename(self.fileName) file_name, file_ext = osp.splitext(file_name_w_ext) if not osp.exists(osp.join(tre_dir,file_name)): os.mkdir(osp.join(tre_dir,file_name)) re_dir = osp.join(tre_dir,file_name) time_str = str(int(time.time())) source_image_name = file_name+time_str+"source"+time_str+file_ext result_image_name = file_name+time_str+"result_"+time_str+file_ext dom_pickle_name = file_name+time_str+"dom_pickle_"+time_str domc_file_name = file_name+time_str+"dom_color_"+time_str+file_ext tar_pickle_name = file_name+time_str+"tar_pickle_"+time_str tarc_file_name = file_name+time_str+"tar_color_"+time_str+file_ext print(osp.join(re_dir,result_image_name)) cv2.imwrite(osp.join(re_dir,result_image_name), cv2.cvtColor(self.reimage_rgb,cv2.COLOR_RGB2BGR)) pickle.dump(self._dom_colors,open(osp.join(re_dir,dom_pickle_name),"wb")) pickle.dump(self._tar_colors,open(osp.join(re_dir,tar_pickle_name),"wb")) dom_ccs = self._dom_colors[:-1] tar_ccs = self._tar_colors[:-1] image_width = 500 image_w_sep = 0.03 image_h_t_b = 0.03 lc_w = (1 - image_w_sep * (len(dom_ccs) + 1)) / len(dom_ccs) * image_width image_height = int(lc_w*(1+image_h_t_b*4)) dom_ccs_img = np.zeros((image_height,image_width,3)) tar_ccs_img = np.zeros((image_height,image_width,3)) dom_ccs_img[:,:,:] = 255 tar_ccs_img[:,:,:] = 255 for ix,(dc, tc) in enumerate(zip(dom_ccs,tar_ccs)): dc_rgb = transfer_lab.lab2rgb_opencv(dc) tc_rgb = transfer_lab.lab2rgb_opencv(tc) top_h = int(image_h_t_b*image_height) bottom_h = int(image_height - image_h_t_b*image_height*4) c_w = (1-image_w_sep*(len(dom_ccs)+1))/len(dom_ccs)*image_width left_w = int((ix+1)*(image_width*image_w_sep) + ix*c_w) right_w = int(left_w+c_w) dom_ccs_img[top_h*3:bottom_h,left_w:right_w] = dc_rgb tar_ccs_img[top_h*3:bottom_h,left_w:right_w] = tc_rgb print(dc != tc) print(dc,tc) if any(dc != tc): tar_ccs_img[-top_h*3:-top_h, left_w:right_w] = tc_rgb cv2.imwrite(osp.join(re_dir,domc_file_name),cv2.cvtColor(dom_ccs_img.astype(np.uint8),cv2.COLOR_RGB2BGR)) cv2.imwrite(osp.join(re_dir,tarc_file_name),cv2.cvtColor(tar_ccs_img.astype(np.uint8),cv2.COLOR_RGB2BGR)) pass @pyqtSlot() def about_software(self): pass @pyqtSlot() def change_color_by_hue(self): print("change colro by hue") self.change_color_by_hue_win.show() @pyqtSlot() def find_dom_colors(self): print(self.dom_color_layout.count()) self.fdm_thread = FindDomColorsThread(self.cvImage) self.fdm_thread.dom_colors.connect(self.update_dom_colors) self.fdm_thread.start() def update_dom_colors(self,dom_colors_): print("update dom_colors",dom_colors_) self._dom_colors = dom_colors_ self._tar_colors = copy.deepcopy(dom_colors_) self.change_color_by_hue_win = ColorChangeHue(self,dom_colors_) self.clear_layout(self.dom_color_layout) self.clear_layout(self.tar_color_layout) c_num = len(dom_colors_) for i in range(c_num): self.dom_color_layout.addWidget(QLabel("dom_c%s" % (i))) self.tar_color_layout.addWidget(QLabel("tar_c%s" % (i))) for idx, w in enumerate(self.layout_widgets(self.dom_color_layout)): w.widget().setAutoFillBackground(True) w.widget().setText(" ") w.widget().setMargin(15) p = w.widget().palette() r,g,b = transfer_lab.lab2rgb_opencv(dom_colors_[idx]) p.setColor(w.widget().backgroundRole(),QColor.fromRgb(r,g,b)) w.widget().setPalette(p) w.widget().setStyleSheet("background-color: %s" % (QColor.fromRgb(r, g, b).name())) for idx, w in enumerate(self.layout_widgets(self.tar_color_layout)): w.widget().setAutoFillBackground(True) w.widget().setText("") w.widget().setMargin(15) p = w.widget().palette() r,g,b = transfer_lab.lab2rgb_opencv(dom_colors_[idx]) p.setColor(w.widget().backgroundRole(),QColor.fromRgb(r,g,b)) w.widget().setPalette(p) w.widget().setStyleSheet("background-color: %s" % (QColor.fromRgb(r,g,b).name())) w.widget().mousePressEvent = functools.partial(self.choose_color, source_object=w.widget(),index=idx) def get_aff_23(self, color_array): srct_ab = [] for sc in color_array: srct_ab.append([sc[1], sc[2]]) return srct_ab def update_target_colors(self,src_tar_map,binsz): # 根据 hue范围进行变色 for k, v in src_tar_map.items(): for idx,tc in enumerate(self._dom_colors): hsl = transfer_lab.rgb2hue_opencv(transfer_lab.lab2rgb_opencv(tc)) h360 = hsl[0]*2 if h360 >= k*binsz and h360 <= (k+1)*binsz: tar_h360 = v*binsz delta = tar_h360-k*binsz h360 += delta hsl[0] = int(h360/2) self._tar_colors[idx] = transfer_lab.rgb2lab_opencv(transfer_lab.hue2rgb_opencv(hsl)) for idx, w in enumerate(self.layout_widgets(self.tar_color_layout)): p = w.widget().palette() r,g,b = transfer_lab.lab2rgb_opencv(self._tar_colors[idx]) p.setColor(w.widget().backgroundRole(),QColor.fromRgb(r,g,b)) w.widget().setPalette(p) w.widget().setStyleSheet("background-color: %s" % (QColor.fromRgb(r, g, b).name())) self.update_image() pass def update_image(self): """根据当前的颜色变化更新图片,如果无变化则不更新""" print(self._tar_colors) if any(self._dom_colors[-1] != np.array(transfer_lab.rgb2lab_opencv([255,255,255]))): self._dom_colors.append(transfer_lab.rgb2lab_opencv([255,255,255])) if any(self._tar_colors[-1] != np.array(transfer_lab.rgb2lab_opencv([255,255,255]))): self._tar_colors.append(transfer_lab.rgb2lab_opencv([255,255,255])) # if any(self._dom_colors[-1] != np.array(transfer_lab.rgb2lab_opencv([109, 109, 109]))): # self._dom_colors.append(transfer_lab.rgb2lab_opencv([109, 109, 109])) # # if any(self._tar_colors[-1] != np.array(transfer_lab.rgb2lab_opencv([109, 109, 109]))): # self._tar_colors.append(transfer_lab.rgb2lab_opencv([109, 109, 109])) # # if any(self._dom_colors[-1] != np.array(transfer_lab.rgb2lab_opencv([149, 149, 149]))): # self._dom_colors.append(transfer_lab.rgb2lab_opencv([149, 149, 149])) # # if any(self._tar_colors[-1] != np.array(transfer_lab.rgb2lab_opencv([149, 149, 149]))): # self._tar_colors.append(transfer_lab.rgb2lab_opencv([149, 149, 149])) src_ab = np.float32(self.get_aff_23(self._dom_colors)) tar_ab = np.float32(self.get_aff_23(self._tar_colors)) # ab 变化 M, status = cv2.findHomography(src_ab, tar_ab, method=cv2.RANSAC, ransacReprojThreshold=15) print("status",status.ravel().tolist()) # mask_status = status.ravel().tolist() tar_widgets = [] for w in (self.layout_widgets(self.tar_color_layout)): tar_widgets.append(w) for idx, ms in enumerate(mask_status): if idx == len(mask_status) - 1: continue if ms == 0: p = tar_widgets[idx].widget().palette() r,g,b = transfer_lab.lab2rgb_opencv(self._dom_colors[idx]) p.setColor(tar_widgets[idx].widget().backgroundRole(),QColor.fromRgb(r,g,b)) tar_widgets[idx].widget().setPalette(p) if M is None: return # M, inliers = cv2.estimateAffinePartial2D(src_ab, tar_ab, method=cv2.RANSAC) # M = cv2.getAffineTransform(src_ab, tar_ab) # M, status = cv2.findHomography(src_ab, tar_ab, method=cv2.LMEDScv2.LMEDS,ransacReprojThreshold=1000) print(M) mode = 2 img_labsave = cv2.cvtColor(self.cvImage,cv2.COLOR_BGR2LAB) hh, ww, _ = img_labsave.shape cc = 0 # print(img_labsave[300,300]) img_labsave2 = copy.deepcopy(img_labsave) img_labsave3 = copy.deepcopy(img_labsave) img_labsave2[:, :, 2] = 1 img_labsave2[:, :, 0], img_labsave2[:, :, 1] = img_labsave[:, :, 1], img_labsave[:, :, 2] img_labsave2 = img_labsave2.astype(np.float) img_labsave3 = img_labsave3.astype(np.float) # img_labsave2=np.reshape(img_labsave2,(-1,3)) re = np.dot(img_labsave2, M.T) re[:, :, 0] /= re[:, :, 2] re[:, :, 1] /= re[:, :, 2] def out_range(r,g,b): return r < 0 or r > 255 or g < 0 or g > 255 or b < 0 or b > 255 def scale_ab(v): sign = 1 if v < 0: sign = -1 # print(v,math.pow(sign*v,0.9)*sign) return math.sqrt(sign*v)*sign def non_linear_image(_lab_v): global is_out is_out = False _a = _lab_v[1] _b = _lab_v[2] r,g,b = transfer_lab.PURE_LAB2RGB(_lab_v) if out_range(r,g,b): is_out = True _a -= 128 _b -= 128 _a = scale_ab(_a) + 128 _b = scale_ab(_b) + 128 _lab_v[1] = _a _lab_v[2] = _b # print("HERE") return _lab_v # iter = 0 # global is_out is_out = False # re = np.apply_along_axis(non_linear_image, 2, re) # while iter <= 100 and is_out: # re = np.apply_along_axis(non_linear_image, 2, re) # iter += 1 # print("itere count", iter) img_labsave3[:, :, 1], img_labsave3[:, :, 2] = re[:, :, 0], re[:, :, 1] # TODO img_save3中 会存在超出可视范围的点。 img_labsave = img_labsave3.astype(np.uint8) self.reimage_rgb = cv2.cvtColor(img_labsave, cv2.COLOR_LAB2RGB) h, w, _bt = self.cvImage.shape btv = _bt * w self.mQImage = QImage(cv2.cvtColor(img_labsave, cv2.COLOR_LAB2RGB), w, h, btv, QImage.Format_RGB888) self.imageView.setPixmap(QPixmap.fromImage(self.mQImage)) self.imageView.show() # QMessageBox.question(self, 'Message', "图片优化计算完成" + str(is_out), QMessageBox.Yes, # QMessageBox.Yes) def choose_color(self, event, source_object=None,index=None): # print("clicked from", source_object) print("index",index) color = QColorDialog.getColor() if color.isValid(): # print(color.name())
<gh_stars>1-10 """ The SQLAlchemy model definition. revision history: 40 - Move image data to seperate table for speed 39 - Remove SQL insert functions, add dataset row to frequencyband table. Add image data. 38 - add varmetric table 37 - add forcedfits_count column to runningcatalog 36 - switch to SQLAlchemy schema initialisation """ from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Index,\ Integer, SmallInteger, String, text, Sequence, LargeBinary from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION as Double SCHEMA_VERSION = 40 Base = declarative_base() metadata = Base.metadata class Assocskyrgn(Base): __tablename__ = 'assocskyrgn' id = Column(Integer, primary_key=True) runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True) runcat = relationship('Runningcatalog', backref=backref('assocskyrgns', cascade="all,delete")) skyrgn_id = Column('skyrgn', ForeignKey('skyregion.id'), nullable=False, index=True) skyrgn = relationship('Skyregion') distance_deg = Column(Double) class Assocxtrsource(Base): __tablename__ = 'assocxtrsource' __table_args__ = ( Index('assocxtrsource_runcat_xtrsrc_key', 'runcat', 'xtrsrc', unique=True), ) id = Column(Integer, primary_key=True) runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False) runcat = relationship('Runningcatalog') xtrsrc_id = Column('xtrsrc', ForeignKey('extractedsource.id'), index=True) xtrsrc = relationship('Extractedsource') type = Column(SmallInteger, nullable=False) distance_arcsec = Column(Double) r = Column(Double) loglr = Column(Double) v_int = Column(Double, nullable=False) eta_int = Column(Double, nullable=False) f_datapoints = Column(Integer, nullable=False) class Config(Base): __tablename__ = 'config' __table_args__ = ( Index('config_dataset_section_key_key', 'dataset', 'section', 'key', unique=True), ) id = Column(Integer, primary_key=True) dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False) dataset = relationship('Dataset', backref=backref('configs', cascade="all,delete")) section = Column(String(100)) key = Column(String(100)) value = Column(String(500)) type = Column(String(5)) seq_dataset = Sequence('seq_dataset') class Dataset(Base): __tablename__ = 'dataset' id = Column(Integer, seq_dataset, server_default=seq_dataset.next_value(), primary_key=True) rerun = Column(Integer, nullable=False, server_default=text("0")) type = Column(SmallInteger, nullable=False, server_default=text("1")) process_start_ts = Column(DateTime, nullable=False) process_end_ts = Column(DateTime) detection_threshold = Column(Double) analysis_threshold = Column(Double) assoc_radius = Column(Double) backsize_x = Column(SmallInteger) backsize_y = Column(SmallInteger) margin_width = Column(Double) description = Column(String(100), nullable=False) node = Column(SmallInteger, nullable=False, server_default=text("1")) nodes = Column(SmallInteger, nullable=False, server_default=text("1")) # extractedsource types BLIND_FIT = 0 FORCED_FIT = 1 MONITORED_FIT = 2 class Extractedsource(Base): __tablename__ = 'extractedsource' id = Column(Integer, primary_key=True) image_id = Column('image', ForeignKey('image.id'), nullable=False, index=True) image = relationship('Image', backref=backref('extractedsources', cascade="all,delete")) ff_runcat_id = Column('ff_runcat', ForeignKey('runningcatalog.id')) ff_runcat = relationship('Runningcatalog', primaryjoin='Extractedsource.ff_runcat_id == Runningcatalog.id') ff_monitor_id = Column('ff_monitor', ForeignKey('monitor.id')) ff_monitor = relationship('Monitor') zone = Column(Integer, nullable=False) ra = Column(Double, nullable=False, index=True) decl = Column(Double, nullable=False, index=True) uncertainty_ew = Column(Double, nullable=False) uncertainty_ns = Column(Double, nullable=False) ra_err = Column(Double, nullable=False, index=True) decl_err = Column(Double, nullable=False, index=True) ra_fit_err = Column(Double, nullable=False) decl_fit_err = Column(Double, nullable=False) ew_sys_err = Column(Double, nullable=False) ns_sys_err = Column(Double, nullable=False) error_radius = Column(Double, nullable=False) x = Column(Double, nullable=False, index=True) y = Column(Double, nullable=False, index=True) z = Column(Double, nullable=False, index=True) racosdecl = Column(Double, nullable=False) margin = Column(Boolean, nullable=False, server_default=text("false")) det_sigma = Column(Double, nullable=False) semimajor = Column(Double) semiminor = Column(Double) pa = Column(Double) f_peak = Column(Double) f_peak_err = Column(Double) f_int = Column(Double) f_int_err = Column(Double) chisq = Column(Double) reduced_chisq = Column(Double) extract_type = Column(SmallInteger) fit_type = Column(SmallInteger) node = Column(SmallInteger, nullable=False, server_default=text("1")) nodes = Column(SmallInteger, nullable=False, server_default=text("1")) seq_frequencyband = Sequence('seq_frequencyband') class Frequencyband(Base): __tablename__ = 'frequencyband' id = Column(Integer, seq_frequencyband, primary_key=True, server_default=seq_frequencyband.next_value()) dataset_id = Column('dataset', Integer, ForeignKey('dataset.id'), nullable=False, index=True) dataset = relationship('Dataset', backref=backref('frequencybands', cascade="all,delete")) freq_central = Column(Double) freq_low = Column(Double) freq_high = Column(Double) seq_image = Sequence('seq_image') class Image(Base): __tablename__ = 'image' id = Column(Integer, seq_image, primary_key=True, server_default=seq_image.next_value()) dataset_id = Column('dataset', Integer, ForeignKey('dataset.id'), nullable=False, index=True) dataset = relationship('Dataset', backref=backref('images', cascade="delete")) band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True) band = relationship('Frequencyband', cascade="delete") skyrgn_id = Column('skyrgn', Integer, ForeignKey('skyregion.id'), nullable=False, index=True) skyrgn = relationship('Skyregion', backref=backref('images', cascade="delete")) tau = Column(Integer) stokes = Column(SmallInteger, nullable=False, server_default=text("1")) tau_time = Column(Double) freq_eff = Column(Double, nullable=False) freq_bw = Column(Double) taustart_ts = Column(DateTime, nullable=False, index=True) rb_smaj = Column(Double, nullable=False) rb_smin = Column(Double, nullable=False) rb_pa = Column(Double, nullable=False) deltax = Column(Double, nullable=False) deltay = Column(Double, nullable=False) fwhm_arcsec = Column(Double) fov_degrees = Column(Double) rms_qc = Column(Double, nullable=False) rms_min = Column(Double) rms_max = Column(Double) detection_thresh = Column(Double) analysis_thresh = Column(Double) url = Column(String(1024)) node = Column(SmallInteger, nullable=False, server_default=text("1")) nodes = Column(SmallInteger, nullable=False, server_default=text("1")) data = relationship("ImageData", uselist=False, back_populates="image") class ImageData(Base): __tablename__ = 'imagedata' id = Column(Integer, primary_key=True) image_id = Column('image', Integer, ForeignKey('image.id'), nullable=False, index=True) image = relationship("Image", back_populates="data") fits_header = Column(String) fits_data = Column(LargeBinary) class Monitor(Base): __tablename__ = 'monitor' id = Column(Integer, primary_key=True) dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True) dataset = relationship('Dataset') runcat_id = Column('runcat', ForeignKey('runningcatalog.id')) runcat = relationship('Runningcatalog') ra = Column(Double, nullable=False) decl = Column(Double, nullable=False) name = Column(String(100)) class Newsource(Base): __tablename__ = 'newsource' id = Column(Integer, primary_key=True) runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True) runcat = relationship('Runningcatalog', backref=backref("newsources", cascade="all,delete")) trigger_xtrsrc_id = Column('trigger_xtrsrc', ForeignKey('extractedsource.id'), nullable=False, index=True) trigger_xtrsrc = relationship('Extractedsource') previous_limits_image_id = Column('previous_limits_image', ForeignKey('image.id'), nullable=False) previous_limits_image = relationship('Image') newsource_type = Column(SmallInteger, nullable=False) class Node(Base): __tablename__ = 'node' __table_args__ = ( Index('node_node_zone_key', 'node', 'zone', unique=True), ) id = Column(Integer, primary_key=True) node = Column(SmallInteger, nullable=False, server_default=text("1")) zone = Column(SmallInteger, nullable=False) zone_min = Column(SmallInteger) zone_max = Column(SmallInteger) zone_min_incl = Column(Boolean, server_default=text("true")) zone_max_incl = Column(Boolean, server_default=text("false")) zoneheight = Column(Double, server_default=text("1.0")) nodes = Column(SmallInteger, nullable=False, server_default=text("1")) class Rejection(Base): __tablename__ = 'rejection' id = Column(Integer, primary_key=True) image_id = Column('image', ForeignKey('image.id'), index=True) image = relationship('Image') # TO DO: Rename this column to 'rejectreason_id', # (rather than just 'rejectreason') so the model attribute matches # the SQL column name, avoiding the current confusing name-shadowing # between the SQL columns and the model attributes. (Issue #508) rejectreason_id = Column('rejectreason', ForeignKey('rejectreason.id'), index=True) rejectreason = relationship('Rejectreason') comment = Column(String(512)) class Rejectreason(Base): __tablename__ = 'rejectreason' id = Column(Integer, primary_key=True) description = Column(String(512)) class Runningcatalog(Base): __tablename__ = 'runningcatalog' id = Column(Integer, primary_key=True) xtrsrc_id = Column('xtrsrc', ForeignKey('extractedsource.id'), nullable=False, unique=True) xtrsrc = relationship('Extractedsource', primaryjoin='Runningcatalog.xtrsrc_id == Extractedsource.id', backref=backref('extractedsources', cascade="all,delete")) dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True) dataset = relationship('Dataset') datapoints = Column(Integer, nullable=False) zone = Column(Integer, nullable=False, index=True) wm_ra = Column(Double, nullable=False, index=True) wm_decl = Column(Double, nullable=False, index=True) wm_uncertainty_ew = Column(Double, nullable=False, index=True) wm_uncertainty_ns = Column(Double, nullable=False, index=True) avg_ra_err = Column(Double, nullable=False) avg_decl_err = Column(Double, nullable=False) avg_wra = Column(Double, nullable=False) avg_wdecl = Column(Double, nullable=False) avg_weight_ra = Column(Double, nullable=False) avg_weight_decl = Column(Double, nullable=False) x = Column(Double, nullable=False, index=True) y = Column(Double, nullable=False, index=True) z = Column(Double, nullable=False, index=True) inactive = Column(Boolean, nullable=False, server_default=text("false")) mon_src = Column(Boolean, nullable=False, server_default=text("false")) forcedfits_count = Column(Integer, server_default=text("0")) extractedsources = relationship('Extractedsource', secondary='assocxtrsource', backref='runningcatalogs') varmetric = relationship("Varmetric", uselist=False, backref="runcat", cascade="all,delete") class Varmetric(Base): __tablename__ = 'varmetric' id = Column(Integer, primary_key=True) runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True, unique=True) v_int = Column(Double, index=True) eta_int = Column(Double) band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True) band = relationship('Frequencyband', cascade="delete") newsource = Column(Integer) sigma_rms_max = Column(Double, index=True) sigma_rms_min = Column(Double, index=True) lightcurve_max = Column(Double, index=True) lightcurve_avg = Column(Double, index=True) lightcurve_median = Column(Double, index=True) class RunningcatalogFlux(Base): __tablename__ = 'runningcatalog_flux' __table_args__ = ( Index('runningcatalog_flux_runcat_band_stokes_key', 'runcat', 'band', 'stokes', unique=True), ) id = Column(Integer, primary_key=True) runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False) runcat = relationship('Runningcatalog', backref=backref('runningcatalogfluxs', cascade="all,delete")) band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True) band = relationship('Frequencyband', cascade="delete") stokes = Column(SmallInteger, nullable=False, server_default=text("1")) f_datapoints = Column(Integer, nullable=False) avg_f_peak = Column(Double) avg_f_peak_sq = Column(Double) avg_f_peak_weight = Column(Double) avg_weighted_f_peak = Column(Double) avg_weighted_f_peak_sq = Column(Double) avg_f_int = Column(Double) avg_f_int_sq = Column(Double) avg_f_int_weight = Column(Double) avg_weighted_f_int = Column(Double) avg_weighted_f_int_sq = Column(Double) seq_skyregion = Sequence('seq_skyregion') class Skyregion(Base): __tablename__ = 'skyregion' id = Column(Integer, seq_skyregion, primary_key=True, server_default=seq_skyregion.next_value()) dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True) dataset = relationship('Dataset', backref=backref('skyregions',cascade="all,delete")) centre_ra = Column(Double, nullable=False) centre_decl = Column(Double, nullable=False) xtr_radius = Column(Double, nullable=False) x = Column(Double, nullable=False) y = Column(Double, nullable=False) z = Column(Double, nullable=False) class Temprunningcatalog(Base): __tablename__ = 'temprunningcatalog' id = Column(Integer, primary_key=True) runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True) runcat = relationship('Runningcatalog') xtrsrc_id = Column('xtrsrc', ForeignKey('extractedsource.id'), nullable=False, index=True) xtrsrc = relationship('Extractedsource') dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True) dataset = relationship('Dataset') band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True) band = relationship('Frequencyband', cascade="delete") distance_arcsec = Column(Double, nullable=False) r = Column(Double, nullable=False) stokes = Column(SmallInteger, nullable=False, server_default=text("1")) datapoints = Column(Integer, nullable=False) zone = Column(Integer, nullable=False) wm_ra = Column(Double, nullable=False) wm_decl = Column(Double, nullable=False) wm_uncertainty_ew = Column(Double, nullable=False) wm_uncertainty_ns = Column(Double, nullable=False) avg_ra_err = Column(Double, nullable=False) avg_decl_err = Column(Double, nullable=False) avg_wra = Column(Double, nullable=False) avg_wdecl = Column(Double, nullable=False) avg_weight_ra = Column(Double, nullable=False) avg_weight_decl = Column(Double, nullable=False) x = Column(Double, nullable=False) y = Column(Double, nullable=False) z = Column(Double, nullable=False) margin = Column(Boolean, nullable=False, server_default=text("false")) inactive = Column(Boolean, nullable=False, server_default=text("false")) beam_semimaj = Column(Double) beam_semimin = Column(Double) beam_pa = Column(Double) f_datapoints = Column(Integer) avg_f_peak = Column(Double) avg_f_peak_sq = Column(Double) avg_f_peak_weight = Column(Double) avg_weighted_f_peak = Column(Double) avg_weighted_f_peak_sq = Column(Double) avg_f_int =
elif n_gpu > 1: model = torch.nn.DataParallel(model, device_ids=[0]) return model def init_and_load_roberta_classifier_model(model_dir, model_name): load_model_file = os.path.join(model_dir, model_name) load_config_file = os.path.join(model_dir, NEW_CONFIG_NAME) my_logger("Loading fine-tuned / pre-trained RoBERTa model %s..."%load_model_file) config = RobertaConfig(load_config_file) config = add_to_config(config, args) print("************ config *****************\n", config) model = RobertaWrapper(config, load_model_file, num_labels=num_labels, ext_embeddings_type=args.ext_embeddings_type) model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) return model def init_random_model(model_dir): #TODO: change BERT_CONFIG_NAME to match the new config file type load_config_file = os.path.join(model_dir, BERT_CONFIG_NAME) my_logger("Initializing (random weights) BERT model...") config = BertConfig(load_config_file) config = add_to_config(config, args) model = BertWrapper(config, None, num_labels=num_labels, ext_embeddings_type=args.ext_embeddings_type) model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model, device_ids=[0]) return model def prepare_optimizer(model, lr = args.learning_rate): # Prepare optimizer global global_step param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 return optimizer def load_or_gen_features(data_type, examples): # loads cached features or converts the features from examples. # Input: # examples: training or eval examples # data_type: 'train', 'eval' or 'mnli' assert data_type in ['train', 'dev', 'test', 'mnli'], "data_type must be either 'train' or 'dev' or 'mnli'" dataloader = None # to make sure it's always declared # Setting cached features Filename cached_features_file = os.path.join(args.cached_features_dir, 'cached_{}_{}_{}_{}_{}{}{}'.format( args.model_type, data_type, args.ee_gen_method, train_setname_str if data_type=='train' else 'mnli' if data_type=='mnli' else dev_setname_str if data_type=='dev' else test_setname_str, str(max_train_examples) if data_type=='train' else str(args.max_mnli_eval_examples) if data_type=='mnli' else str(args.max_dev_examples) if data_type=='dev' else str(args.max_test_examples), ('_maxMNLI_%d'%(max_mix_MNLI_examples)) if (max_mix_MNLI_examples >= 0 and data_type=='train') else '', ('_w*%d'%args.weight_to_challenge_set), )) if not os.path.exists(args.cached_features_dir): os.mkdir(args.cached_features_dir) overwrite = args.overwrite_cached_features and (data_type not in already_overwritten_data or args.weight_to_challenge_set not in already_overwritten_weight) if os.path.exists(cached_features_file) and not overwrite: my_logger("Loading %s features from cached file %s" % (data_type, cached_features_file)) features = torch.load(cached_features_file) my_logger(f"Loaded {len(features)} features") else: my_logger("Creating %s features from dataset file at %s" % (data_type, cached_features_file)) features = convert_examples_to_features(args, examples, args.max_seq_length, tokenizer, args.ext_embeddings_type, args.break_position, data_type) my_logger(f"Created {len(features)} features") my_logger("Saving %s features into cached file %s"%(data_type, cached_features_file)) torch.save(features, cached_features_file) already_overwritten_data.append(data_type) already_overwritten_weight.append(args.weight_to_challenge_set) if args.model_type == 'bert': all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_ext_emb_ids = torch.tensor([f.ext_emb_ids for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_example_weight_ids = torch.tensor([f.example_weight for f in features], dtype=torch.float) all_example_source_ids = torch.tensor([f.example_source for f in features], dtype=torch.long) data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_ext_emb_ids, all_example_weight_ids, all_example_source_ids) elif args.model_type == 'roberta': # all_segment_ids is excluded in RoBERTa all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_ext_emb_ids = torch.tensor([f.ext_emb_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_example_weight_ids = torch.tensor([f.example_weight for f in features], dtype=torch.float) all_example_source_ids = torch.tensor([f.example_source for f in features], dtype=torch.long) data = TensorDataset(all_input_ids, all_input_mask, all_label_ids, all_ext_emb_ids, all_example_weight_ids, all_example_source_ids) if data_type == 'train': if not args.semi_random_train: train_sampler = RandomSampler(data) else: train_sampler = SequentialSampler(data) # I changed it so I can locate my training examples before or after MNLI, while I shuffled them already dataloader = DataLoader(data, sampler=train_sampler, batch_size=args.train_batch_size) elif data_type in ['dev', 'test', 'mnli']: eval_sampler = SequentialSampler(data) dataloader = DataLoader(data, sampler=eval_sampler, batch_size=args.eval_batch_size) return dataloader def get_MNLI_dev_acc(data_dir, model) : my_logger('Loading dev examples from %s...'%data_dir) mnli_eval_examples = processor.get_dev_examples(data_dir) mnli_eval_examples = mnli_eval_examples[:args.max_mnli_eval_examples] my_logger('Number of NLI dev examples: %d'%len(mnli_eval_examples), 1) eval_dataloader = load_or_gen_features('mnli', mnli_eval_examples) model.eval() eval_loss, mnli_acc = 0, 0 label_count, eval_accuracy_by_label, pred_dist, dist_by_label = np.zeros([3,]), np.zeros([3,]), np.zeros([3,]), np.zeros([9,]) nb_eval_steps, nb_eval_examples = 0, 0 results = [] for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) with torch.no_grad(): if args.model_type == 'bert': input_ids, input_mask, segment_ids, label_ids, ext_emb_ids, example_weight, example_source = batch logits = model(input_ids, segment_ids, input_mask, labels=None, ext_emb_ids=ext_emb_ids, fix_position=args.fix_position) elif args.model_type == 'roberta': input_ids, input_mask, label_ids, ext_emb_ids, example_weight, example_source = batch # same like 'bert' but without 'segment_ids' outputs = model(input_ids, attention_mask=input_mask, labels=None, ext_emb_ids=ext_emb_ids, example_weight=example_weight, fix_position=args.fix_position) logits = outputs[0] logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() example_source = example_source.detach().cpu().numpy() pred_label = np.argmax(logits,1) probs = np.max(softmax(logits,1), 1) tmp_eval_accuracy = accuracy(logits, label_ids) tmp_eval_accuracy_by_label, tmp_dist_by_label, _ = accuracy_by_label(logits, label_ids, example_source) temp_pred_dist = pred_distribution(logits) label_count += [np.sum(label_ids==0), np.sum(label_ids==1), np.sum(label_ids==2)] results += list(zip(pred_label, probs)) mnli_acc += tmp_eval_accuracy eval_accuracy_by_label += tmp_eval_accuracy_by_label dist_by_label += tmp_dist_by_label pred_dist += temp_pred_dist nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 mnli_acc = mnli_acc / nb_eval_examples eval_accuracy_by_label = eval_accuracy_by_label / label_count dist_by_label = dist_by_label / ([label_count[0]]*3 + [label_count[1]]*3 + [label_count[2]]*3) # dividing by :[nb_label_0, nb_label_0, nb_label_0, nb_label_1, nb_label_1, nb_label_1, nb_label_2, nb_label_2 nb_label_2] pred_dist = 1.* pred_dist / nb_eval_examples # MNLI_results.append(mnli_acc) my_logger('MNLI dev Accuracy = %1.2f (Acc by label: Cont: %1.1f, Ent: %1.1f, Neut: %1.1f; \t\tPred Dist: %1.0f%%, %1.0f%%, %1.0f%%)\n'% (mnli_acc*100, eval_accuracy_by_label[0]*100, eval_accuracy_by_label[1]*100, eval_accuracy_by_label[2]*100,pred_dist[0]*100, pred_dist[1]*100, pred_dist[2]*100)) mnli_eval_acc_by_label_str = "(Acc by label: Cont: %1.1f, Ent: %1.1f, Neut: %1.1f; \t\tPred Dist: %1.0f%%, %1.0f%%, %1.0f%%)"%(eval_accuracy_by_label[0]*100, eval_accuracy_by_label[1]*100, eval_accuracy_by_label[2]*100,pred_dist[0]*100, pred_dist[1]*100, pred_dist[2]*100) dist_by_label_str = "(MNLI Dist by label: **Cont**: Cont:%1.2f, Ent: %1.2f, Neut: %1.2f; \t**Ent**: Cont:%1.2f, Ent: %1.2f, Neut: %1.2f; \t**Neu**: Cont:%1.2f, Ent: %1.2f, Neut: %1.2f; )"%(dist_by_label[0]*100, dist_by_label[1]*100, dist_by_label[2]*100, dist_by_label[3]*100, dist_by_label[4]*100, dist_by_label[5]*100, dist_by_label[6]*100, dist_by_label[7]*100, dist_by_label[8]*100) return mnli_acc, results, mnli_eval_examples, mnli_eval_acc_by_label_str, dist_by_label_str def eval_examples_batch(eval_examples, model, data_type=None): my_logger('\nEvaluating Test Set(%d exapmles):'%(len(eval_examples))) data_type = data_type if data_type is not None else 'dev' eval_dataloader = load_or_gen_features(data_type, eval_examples) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 label_count, eval_accuracy_by_label, pred_dist, dist_by_label, eval_accuracy_by_source, source_count = np.zeros([3,]), np.zeros([3,]), np.zeros([3,]), np.zeros([9,]), np.zeros([2,]), np.zeros([2,]) # results = [] ind = 0 eval_loss = 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) with torch.no_grad(): if args.model_type == 'bert': input_ids, input_mask, segment_ids, label_ids, ext_emb_ids, example_weight, example_source = batch tmp_eval_loss = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids, ext_emb_ids=ext_emb_ids, example_weight=example_weight, fix_position=args.fix_position) logits = model(input_ids, segment_ids, input_mask, labels=None, ext_emb_ids=ext_emb_ids, fix_position=args.fix_position) elif args.model_type == 'roberta': input_ids, input_mask, label_ids, ext_emb_ids, example_weight, example_source = batch # same like 'bert' but without 'segment_ids' tmp_outputs = model(input_ids, attention_mask=input_mask, labels=label_ids, ext_emb_ids=ext_emb_ids, example_weight=example_weight, fix_position=args.fix_position) # output = tupple(loss) tmp_eval_loss = tmp_outputs[0] outputs = model(input_ids, attention_mask=input_mask, labels=None, ext_emb_ids=ext_emb_ids, example_weight=example_weight, fix_position=args.fix_position) # output = tupple(logits) logits = outputs[0] tmp_eval_loss = tmp_eval_loss.mean() eval_loss += tmp_eval_loss.item() logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() pred_label = np.argmax(logits,1) probs = np.max(softmax(logits,1), 1)*100 example_source = example_source.detach().cpu().numpy() tmp_eval_accuracy = accuracy(logits, label_ids) tmp_eval_accuracy_by_label, tmp_dist_by_label, tmp_eval_accuracy_by_source = accuracy_by_label(logits, label_ids, example_source) temp_pred_dist = pred_distribution(logits) label_count += [np.sum(label_ids==0), np.sum(label_ids==1), np.sum(label_ids==2)] source_count += [np.sum(example_source==0), np.sum(example_source==1)] # results += list(zip(pred_label, probs)) eval_accuracy += tmp_eval_accuracy eval_accuracy_by_label += tmp_eval_accuracy_by_label eval_accuracy_by_source += tmp_eval_accuracy_by_source dist_by_label += tmp_dist_by_label pred_dist += temp_pred_dist nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 ind += input_ids.shape[0] eval_accuracy = eval_accuracy / nb_eval_examples eval_accuracy_by_label = eval_accuracy_by_label / label_count eval_accuracy_by_source = eval_accuracy_by_source / source_count dist_by_label = dist_by_label / ([label_count[0]]*3 + [label_count[1]]*3 + [label_count[2]]*3) # dividing by :[nb_label_0, nb_label_0, nb_label_0, nb_label_1, nb_label_1, nb_label_1, nb_label_2, nb_label_2 nb_label_2] pred_dist = 1.* pred_dist / nb_eval_examples tot_eval_loss = eval_loss / nb_eval_steps eval_acc_by_label_str = "(Acc by label: Cont: %1.2f, Ent: %1.2f, Neut: %1.2f; \t\tPred Dist: %1.2f%%, %1.2f%%, %1.2f%% \tChallenge-exmp Acc: %1.2f%%, MNLI-exmp Acc:%1.2f%%)"%(eval_accuracy_by_label[0]*100, eval_accuracy_by_label[1]*100, eval_accuracy_by_label[2]*100,pred_dist[0]*100, pred_dist[1]*100, pred_dist[2]*100, eval_accuracy_by_source[0], eval_accuracy_by_source[1]) dist_by_label_str = "(Dist by label: **Cont**: Cont:%1.2f, Ent: %1.2f, Neut: %1.2f; \t**Ent**: Cont:%1.2f, Ent: %1.2f, Neut: %1.2f; \t**Neu**: Cont:%1.2f, Ent: %1.2f, Neut: %1.2f; )"%(dist_by_label[0]*100, dist_by_label[1]*100, dist_by_label[2]*100, dist_by_label[3]*100, dist_by_label[4]*100, dist_by_label[5]*100, dist_by_label[6]*100, dist_by_label[7]*100, dist_by_label[8]*100) return eval_accuracy, tot_eval_loss, eval_acc_by_label_str, dist_by_label_str def save_train_templates(train_templates): info_filename = os.path.join(args.taught_model_dir, 'training_info.txt') with open(info_filename, 'w') as f: f.write('Trained on: %s \n\n\n' % train_setname_str) f.write('Saved on ' + str(datetime.datetime.now()) +'\n') f.write(' '.join(sys.argv) + '\n\n') for line in template2str(train_templates): f.write(str(line) + '\n') pkl_filename = os.path.join(args.taught_model_dir, 'train_templates.pkl') with open(pkl_filename , 'wb') as handle: pickle.dump(train_templates, handle) def get_item2class(): classes = { 'fruits': ['berries', 'apples', 'bananas', 'oranges', 'lemons', 'peaches', 'grapes', 'pineapples', 'pears', 'watermelons'],
f"'{export_format_id}' is not a supported export format for this model. " f"Choose one of the following: {self.supported_export_formats}" ) content_types = gca_model_compat.Model.ExportFormat.ExportableContent supported_content_types = self.supported_export_formats[export_format_id] if ( artifact_destination and content_types.ARTIFACT not in supported_content_types ): raise ValueError( "This model can not be exported as an artifact in '{export_format_id}' format. " "Try exporting as a container image by passing the `image_destination` argument." ) if image_destination and content_types.IMAGE not in supported_content_types: raise ValueError( "This model can not be exported as a container image in '{export_format_id}' format. " "Try exporting the model artifacts by passing a `artifact_destination` argument." ) # Construct request payload output_config = gca_model_service_compat.ExportModelRequest.OutputConfig( export_format_id=export_format_id ) if artifact_destination: output_config.artifact_destination = gca_io_compat.GcsDestination( output_uri_prefix=artifact_destination ) if image_destination: output_config.image_destination = gca_io_compat.ContainerRegistryDestination( output_uri=image_destination ) _LOGGER.log_action_start_against_resource("Exporting", "model", self) operation_future = self.api_client.export_model( name=self.resource_name, output_config=output_config ) _LOGGER.log_action_started_against_resource_with_lro( "Export", "model", self.__class__, operation_future ) # Block before returning self._wait_on_export(operation_future=operation_future, sync=sync) _LOGGER.log_action_completed_against_resource("model", "exported", self) return json_format.MessageToDict(operation_future.metadata.output_info._pb) @classmethod @base.optional_sync() def upload_xgboost_model_file( cls, model_file_path: str, xgboost_version: str = "1.4", display_name: str = "XGBoost model", description: Optional[str] = None, instance_schema_uri: Optional[str] = None, parameters_schema_uri: Optional[str] = None, prediction_schema_uri: Optional[str] = None, explanation_metadata: Optional[explain.ExplanationMetadata] = None, explanation_parameters: Optional[explain.ExplanationParameters] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model resource. Note: This function is *experimental* and can be changed in the future. Example usage:: my_model = Model.upload_xgboost_model_file( model_file_path="iris.xgboost_model.bst" ) Args: model_file_path (str): Required. Local file path of the model. xgboost_version (str): Optional. The version of the XGBoost serving container. Supported versions: ["0.82", "0.90", "1.1", "1.2", "1.3", "1.4"]. If the version is not specified, the latest version is used. display_name (str): Optional. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. description (str): The description of the model. instance_schema_uri (str): Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in ``PredictRequest.instances``, ``ExplainRequest.instances`` and ``BatchPredictionJob.input_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. parameters_schema_uri (str): Optional. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via ``PredictRequest.parameters``, ``ExplainRequest.parameters`` and ``BatchPredictionJob.model_parameters``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform, if no parameters are supported it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. prediction_schema_uri (str): Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via ``PredictResponse.predictions``, ``ExplainResponse.explanations``, and ``BatchPredictionJob.output_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. explanation_metadata (explain.ExplanationMetadata): Optional. Metadata describing the Model's input and output for explanation. Both `explanation_metadata` and `explanation_parameters` must be passed together when used. For more details, see `Ref docs <http://tinyurl.com/1igh60kt>` explanation_parameters (explain.ExplanationParameters): Optional. Parameters to configure explaining for Model's predictions. For more details, see `Ref docs <http://tinyurl.com/1an4zake>` project: Optional[str]=None, Project to upload this model to. Overrides project set in aiplatform.init. location: Optional[str]=None, Location to upload this model to. Overrides location set in aiplatform.init. credentials: Optional[auth_credentials.Credentials]=None, Custom credentials to use to upload this model. Overrides credentials set in aiplatform.init. labels (Dict[str, str]): Optional. The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. encryption_spec_key_name (Optional[str]): Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, this Model and all sub-resources of this Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. staging_bucket (str): Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. Returns: model: Instantiated representation of the uploaded model resource. Raises: ValueError: If only `explanation_metadata` or `explanation_parameters` is specified. Also if model directory does not contain a supported model file. """ XGBOOST_SUPPORTED_MODEL_FILE_EXTENSIONS = [ ".pkl", ".joblib", ".bst", ] container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri( region=location, framework="xgboost", framework_version=xgboost_version, accelerator="cpu", ) model_file_path_obj = pathlib.Path(model_file_path) if not model_file_path_obj.is_file(): raise ValueError( f"model_file_path path must point to a file: '{model_file_path}'" ) model_file_extension = model_file_path_obj.suffix if model_file_extension not in XGBOOST_SUPPORTED_MODEL_FILE_EXTENSIONS: _LOGGER.warning( f"Only the following XGBoost model file extensions are currently supported: '{XGBOOST_SUPPORTED_MODEL_FILE_EXTENSIONS}'" ) _LOGGER.warning( "Treating the model file as a binary serialized XGBoost Booster." ) model_file_extension = ".bst" # Preparing model directory # We cannot clean up the directory immediately after calling Model.upload since # that call may be asynchronous and return before the model file has been read. # To work around this, we make this method asynchronous (decorate with @base.optional_sync) # but call Model.upload with sync=True. with tempfile.TemporaryDirectory() as prepared_model_dir: prepared_model_file_path = pathlib.Path(prepared_model_dir) / ( "model" + model_file_extension ) shutil.copy(model_file_path_obj, prepared_model_file_path) return cls.upload( serving_container_image_uri=container_image_uri, artifact_uri=prepared_model_dir, display_name=display_name, description=description, instance_schema_uri=instance_schema_uri, parameters_schema_uri=parameters_schema_uri, prediction_schema_uri=prediction_schema_uri, explanation_metadata=explanation_metadata, explanation_parameters=explanation_parameters, project=project, location=location, credentials=credentials, labels=labels, encryption_spec_key_name=encryption_spec_key_name, staging_bucket=staging_bucket, sync=True, ) @classmethod @base.optional_sync() def upload_scikit_learn_model_file( cls, model_file_path: str, sklearn_version: str = "1.0", display_name: str = "Scikit-learn model", description: Optional[str] = None, instance_schema_uri: Optional[str] = None, parameters_schema_uri: Optional[str] = None, prediction_schema_uri: Optional[str] = None, explanation_metadata: Optional[explain.ExplanationMetadata] = None, explanation_parameters: Optional[explain.ExplanationParameters] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model resource. Note: This function is *experimental* and can be changed in the future. Example usage:: my_model = Model.upload_scikit_learn_model_file( model_file_path="iris.sklearn_model.joblib" ) Args: model_file_path (str): Required. Local file path of the model. sklearn_version (str): Optional. The version of the Scikit-learn serving container. Supported versions: ["0.20", "0.22", "0.23", "0.24", "1.0"]. If the version is not specified, the latest version is used. display_name (str): Optional. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. description (str): The description of the model. instance_schema_uri (str): Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in ``PredictRequest.instances``, ``ExplainRequest.instances`` and ``BatchPredictionJob.input_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. parameters_schema_uri (str): Optional. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via ``PredictRequest.parameters``, ``ExplainRequest.parameters`` and ``BatchPredictionJob.model_parameters``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this
#!/usr/bin/python # -- Content-Encoding: UTF-8 -- """ Herald HTTP transport discovery, based on a homemade multicast protocol :author: <NAME> :copyright: Copyright 2014, isandlaTech :license: Apache License 2.0 :version: 1.0.1 :status: Alpha .. Copyright 2014 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Bundle version import herald.version __version__=herald.version.__version__ # ------------------------------------------------------------------------------ # Herald from . import ACCESS_ID, SERVICE_HTTP_TRANSPORT, SERVICE_HTTP_RECEIVER, \ FACTORY_DISCOVERY_MULTICAST, PROP_MULTICAST_GROUP, PROP_MULTICAST_PORT, \ PROP_DISCOVER_LOCAL_PEERS import herald import herald.beans as beans import herald.utils as utils import herald.transports.peer_contact as peer_contact # Pelix/iPOPO from pelix.ipopo.decorators import ComponentFactory, Requires, Validate, \ Invalidate, Property, RequiresBest from pelix.utilities import to_bytes, to_unicode # Standard library import logging import os import select import socket import struct import threading import time # ------------------------------------------------------------------------------ # Version of packet format # 3: used in Cohorte starting from version 1.2 PACKET_FORMAT_VERSION = 3 # Heart beat packet type PACKET_TYPE_HEARTBEAT = 1 # Last beat packet type PACKET_TYPE_LASTBEAT = 2 PROBE_CHANNEL_MULTICAST = "http_multicast" """ Name of the multicast discovery probe channel """ _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ if os.name == "nt": # Windows Specific code def pton(family, address): """ Calls inet_pton :param family: Socket family :param address: A string address :return: The binary form of the given address """ if family == socket.AF_INET: return socket.inet_aton(address) elif family == socket.AF_INET6: # Do it using WinSocks import ctypes winsock = ctypes.windll.ws2_32 # Prepare structure class sockaddr_in6(ctypes.Structure): """ Definition of the C structure sockaddr_in6 """ # pylint: disable=C0103 _fields_ = [("sin6_family", ctypes.c_short), ("sin6_port", ctypes.c_ushort), ("sin6_flowinfo", ctypes.c_ulong), ("sin6_addr", ctypes.c_ubyte * 16), ("sin6_scope_id", ctypes.c_ulong)] # Prepare pointers addr_ptr = ctypes.c_char_p(to_bytes(address)) out_address = sockaddr_in6() size = len(sockaddr_in6) size_ptr = ctypes.pointer(size) # Second call winsock.WSAStringToAddressA(addr_ptr, family, 0, out_address, size_ptr) # Convert the array... bin_addr = 0 for part in out_address.sin6_addr: bin_addr = bin_addr * 16 + part return bin_addr else: raise ValueError("Unhandled socket family: {0}".format(family)) else: # Other systems def pton(family, address): """ Calls inet_pton :param family: Socket family :param address: A string address :return: The binary form of the given address """ return socket.inet_pton(family, address) def make_mreq(family, address): """ Makes a mreq structure object for the given address and socket family. :param family: A socket family (AF_INET or AF_INET6) :param address: A multicast address (group) :raise ValueError: Invalid family or address """ if not address: raise ValueError("Empty address") # Convert the address to a binary form group_bin = pton(family, address) if family == socket.AF_INET: # IPv4 # struct ip_mreq # { # struct in_addr imr_multiaddr; /* IP multicast address of group */ # struct in_addr imr_interface; /* local IP address of interface */ # }; # "=I" : Native order, standard size unsigned int return group_bin + struct.pack("=I", socket.INADDR_ANY) elif family == socket.AF_INET6: # IPv6 # struct ipv6_mreq { # struct in6_addr ipv6mr_multiaddr; # unsigned int ipv6mr_interface; # }; # "@I" : Native order, native size unsigned int return group_bin + struct.pack("@I", 0) raise ValueError("Unknown family {0}".format(family)) def create_multicast_socket(address, port, join=True): """ Creates a multicast socket according to the given address and port. Handles both IPv4 and IPv6 addresses. :param address: Multicast address/group :param port: Socket port :param join: If False, the socket is not bound and does not join the multicast group (creates a simple UDP socket) :return: A tuple (socket, listening address) :raise ValueError: Invalid address or port """ # Get the information about a datagram (UDP) socket, of any family try: addrs_info = socket.getaddrinfo(address, port, socket.AF_UNSPEC, socket.SOCK_DGRAM) except socket.gaierror: raise ValueError("Error retrieving address information ({0}, {1})" .format(address, port)) if len(addrs_info) > 1: _logger.debug("More than one address information found. " "Using the first one.") # Get the first entry : (family, socktype, proto, canonname, sockaddr) addr_info = addrs_info[0] # Only accept IPv4/v6 addresses if addr_info[0] not in (socket.AF_INET, socket.AF_INET6): # Unhandled address family raise ValueError("Unhandled socket family : %d" % (addr_info[0])) # Prepare the socket sock = socket.socket(addr_info[0], socket.SOCK_DGRAM) if join: # Reuse address sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: # Special case for MacOS # pylint: disable=no-member sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except AttributeError: pass # Bind the socket if sock.family == socket.AF_INET: # IPv4 binding sock.bind(('0.0.0.0', port)) else: # IPv6 Binding sock.bind(('::', port)) # Prepare the mreq structure to join the group # addrinfo[4] = (addr,port) mreq = make_mreq(sock.family, addr_info[4][0]) # Join the group if sock.family == socket.AF_INET: # IPv4 sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) # Allow multicast packets to get back on this host sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1) else: # IPv6 sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq) # Allow multicast packets to get back on this host sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1) return sock, addr_info[4][0] def close_multicast_socket(sock, address): """ Cleans up the given multicast socket. Unregisters it of the multicast group. Parameters should be the result of create_multicast_socket :param sock: A multicast socket :param address: The multicast address used by the socket """ if sock is None: return if address: # Prepare the mreq structure to join the group mreq = make_mreq(sock.family, address) # Quit group if sock.family == socket.AF_INET: # IPv4 sock.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, mreq) elif sock.family == socket.AF_INET6: # IPv6 sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, mreq) # Close the socket sock.close() # ------------------------------------------------------------------------------ def make_heartbeat(port, path, peer_uid, node_uid, app_id): """ Prepares the heart beat UDP packet Format : Little endian * Kind of beat (1 byte) * Herald HTTP server port (2 bytes) * Herald HTTP servlet path length (2 bytes) * Herald HTTP servlet path (variable, UTF-8) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Node UID length (2 bytes) * Node UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param port: The port to access the Herald HTTP server :param path: The path to the Herald HTTP servlet :param peer_uid: The UID of the peer :param node_uid: The UID of the node :param app_id: Application ID :return: The heart beat packet content (byte array) """ # Type and port... packet = struct.pack("<BBH", PACKET_FORMAT_VERSION, PACKET_TYPE_HEARTBEAT, port) for string in (path, peer_uid, node_uid, app_id): # Strings... string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet def make_lastbeat(peer_uid, app_id): """ Prepares the last beat UDP packet (when the peer is going away) Format : Little endian * Kind of beat (1 byte) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param peer_uid: Peer UID :param app_id: Application ID :return: The last beat packet content (byte array) """ packet = struct.pack("<BB", PACKET_FORMAT_VERSION, PACKET_TYPE_LASTBEAT) for string in (peer_uid, app_id): string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet # ------------------------------------------------------------------------------ class MulticastReceiver(object): """ A multicast datagram receiver """ def __init__(self, group, port, callback): """ Sets up the receiver The given callback must have the following signature: ``callback(host, port, path, peer_uid)``. :param group: Multicast group to listen :param port: Multicast port :param callback: Method to call back once a packet is received """ # Parameters self._group = group self._port = port self._callback = callback # Reception loop self._stop_event = threading.Event() self._thread = None # Socket self._socket = None def start(self): """ Starts listening to the socket :return: True if the socket has been created """ # Create the multicast socket (update the group) self._socket, self._group = create_multicast_socket(self._group, self._port) # Start the listening thread self._stop_event.clear() self._thread = threading.Thread( target=self.__read, name="MulticastReceiver-{0}".format(self._port)) self._thread.start() def stop(self): """ Stops listening to the socket """ # Stop the loop self._stop_event.set() # Join the thread self._thread.join() self._thread = None # Close the socket close_multicast_socket(self._socket, self._group) def _handle_heartbeat(self, sender, data): """ Handles a raw heart beat :param sender: Sender (address, port) tuple :param data: Raw packet data """ # Format of packet parsed, data = self._unpack("<B", data) format = parsed[0] if format == PACKET_FORMAT_VERSION: # Kind of beat parsed, data = self._unpack("<B", data) kind = parsed[0] if kind == PACKET_TYPE_HEARTBEAT: #
= [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2175}) V_827 = Vertex(name = 'V_827', particles = [ P.sd1__tilde__, P.sd1, P.su1__tilde__, P.su1 ], color = [ 'Identity(1,2)*Identity(3,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,2,1)*T(-1,4,3)' ], lorentz = [ L.SSSS1 ], couplings = {(1,0):C.GC_594,(0,0):C.GC_592,(2,0):C.GC_593}) V_828 = Vertex(name = 'V_828', particles = [ P.sd2__tilde__, P.sd2, P.su1__tilde__, P.su1 ], color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_595,(1,0):C.GC_596}) V_829 = Vertex(name = 'V_829', particles = [ P.sd3__tilde__, P.sd3, P.su1__tilde__, P.su1 ], color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_597,(1,0):C.GC_598}) V_830 = Vertex(name = 'V_830', particles = [ P.sd4__tilde__, P.sd4, P.su1__tilde__, P.su1 ], color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_573,(1,0):C.GC_599}) V_831 = Vertex(name = 'V_831', particles = [ P.sd5__tilde__, P.sd5, P.su1__tilde__, P.su1 ], color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_574,(1,0):C.GC_600}) V_832 = Vertex(name = 'V_832', particles = [ P.sd6__tilde__, P.sd6, P.su1__tilde__, P.su1 ], color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_575,(1,0):C.GC_601}) V_833 = Vertex(name = 'V_833', particles = [ P.su1__tilde__, P.su1__tilde__, P.su1, P.su1 ], color = [ 'Identity(1,3)*Identity(2,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,3,1)*T(-1,4,2)', 'T(-1,3,2)*T(-1,4,1)' ], lorentz = [ L.SSSS1 ], couplings = {(1,0):C.GC_605,(0,0):C.GC_605,(3,0):C.GC_606,(2,0):C.GC_606}) V_834 = Vertex(name = 'V_834', particles = [ P.n1, P.c, P.su2__tilde__ ], color = [ 'Identity(2,3)' ], lorentz = [ L.FFS4 ], couplings = {(0,0):C.GC_635}) V_835 = Vertex(name = 'V_835', particles = [ P.n2, P.c, P.su2__tilde__ ], color = [ 'Identity(2,3)' ], lorentz = [ L.FFS4 ], couplings = {(0,0):C.GC_636}) V_836 = Vertex(name = 'V_836', particles = [ P.n3, P.c, P.su2__tilde__ ], color = [ 'Identity(2,3)' ], lorentz = [ L.FFS4 ], couplings = {(0,0):C.GC_637}) V_837 = Vertex(name = 'V_837', particles = [ P.n4, P.c, P.su2__tilde__ ], color = [ 'Identity(2,3)' ], lorentz = [ L.FFS4 ], couplings = {(0,0):C.GC_638}) V_838 = Vertex(name = 'V_838', particles = [ P.a, P.su2__tilde__, P.su2 ], color = [ 'Identity(2,3)' ], lorentz = [ L.VSS1, L.VSS3 ], couplings = {(0,0):C.GC_608,(0,1):C.GC_609}) V_839 = Vertex(name = 'V_839', particles = [ P.G__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(2,3)' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2119}) V_840 = Vertex(name = 'V_840', particles = [ P.H__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(2,3)' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2118}) V_841 = Vertex(name = 'V_841', particles = [ P.G__plus__, P.h02, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1165}) V_842 = Vertex(name = 'V_842', particles = [ P.h01, P.H__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1164}) V_843 = Vertex(name = 'V_843', particles = [ P.G0, P.G__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1103}) V_844 = Vertex(name = 'V_844', particles = [ P.A0, P.H__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1104}) V_845 = Vertex(name = 'V_845', particles = [ P.sd2, P.sl1__plus__, P.sv1, P.su2__tilde__ ], color = [ 'Identity(1,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_628}) V_846 = Vertex(name = 'V_846', particles = [ P.sd2, P.sl2__plus__, P.sv2, P.su2__tilde__ ], color = [ 'Identity(1,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_629}) V_847 = Vertex(name = 'V_847', particles = [ P.sd2, P.sl3__plus__, P.sv3, P.su2__tilde__ ], color = [ 'Identity(1,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_630}) V_848 = Vertex(name = 'V_848', particles = [ P.G__plus__, P.h01, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2287}) V_849 = Vertex(name = 'V_849', particles = [ P.h02, P.H__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2287}) V_850 = Vertex(name = 'V_850', particles = [ P.A0, P.G__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2011}) V_851 = Vertex(name = 'V_851', particles = [ P.G0, P.H__plus__, P.sd2, P.su2__tilde__ ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2011}) V_852 = Vertex(name = 'V_852', particles = [ P.sd1__tilde__, P.sd2, P.su1, P.su2__tilde__ ], color = [ 'Identity(1,3)*Identity(2,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_623}) V_853 = Vertex(name = 'V_853', particles = [ P.c__tilde__, P.n1, P.su2 ], color = [ 'Identity(1,3)' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_107}) V_854 = Vertex(name = 'V_854', particles = [ P.c__tilde__, P.n2, P.su2 ], color = [ 'Identity(1,3)' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_130}) V_855 = Vertex(name = 'V_855', particles = [ P.c__tilde__, P.n3, P.su2 ], color = [ 'Identity(1,3)' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_153}) V_856 = Vertex(name = 'V_856', particles = [ P.c__tilde__, P.n4, P.su2 ], color = [ 'Identity(1,3)' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_176}) V_857 = Vertex(name = 'V_857', particles = [ P.s__tilde__, P.x1__minus__, P.su2 ], color = [ 'Identity(1,3)' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_899}) V_858 = Vertex(name = 'V_858', particles = [ P.s__tilde__, P.x2__minus__, P.su2 ], color = [ 'Identity(1,3)' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_915}) V_859 = Vertex(name = 'V_859', particles = [ P.G__minus__, P.sd2__tilde__, P.su2 ], color = [ 'Identity(2,3)' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2097}) V_860 = Vertex(name = 'V_860', particles = [ P.H__minus__, P.sd2__tilde__, P.su2 ], color = [ 'Identity(2,3)' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2096}) V_861 = Vertex(name = 'V_861', particles = [ P.H__minus__, P.h01, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1152}) V_862 = Vertex(name = 'V_862', particles = [ P.G__minus__, P.h02, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1153}) V_863 = Vertex(name = 'V_863', particles = [ P.G0, P.G__minus__, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1078}) V_864 = Vertex(name = 'V_864', particles = [ P.A0, P.H__minus__, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1077}) V_865 = Vertex(name = 'V_865', particles = [ P.sd2__tilde__, P.sl1__minus__, P.sv1__tilde__, P.su2 ], color = [ 'Identity(1,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_515}) V_866 = Vertex(name = 'V_866', particles = [ P.sd2__tilde__, P.sl2__minus__, P.sv2__tilde__, P.su2 ], color = [ 'Identity(1,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_531}) V_867 = Vertex(name = 'V_867', particles = [ P.sd2__tilde__, P.sl3__minus__, P.sv3__tilde__, P.su2 ], color = [ 'Identity(1,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_547}) V_868 = Vertex(name = 'V_868', particles = [ P.G__minus__, P.h01, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2281}) V_869 = Vertex(name = 'V_869', particles = [ P.H__minus__, P.h02, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2281}) V_870 = Vertex(name = 'V_870', particles = [ P.A0, P.G__minus__, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2000}) V_871 = Vertex(name = 'V_871', particles = [ P.G0, P.H__minus__, P.sd2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2000}) V_872 = Vertex(name = 'V_872', particles = [ P.sd1, P.sd2__tilde__, P.su1__tilde__, P.su2 ], color = [ 'Identity(1,3)*Identity(2,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_571}) V_873 = Vertex(name = 'V_873', particles = [ P.sv1__tilde__, P.sv1, P.su2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_634}) V_874 = Vertex(name = 'V_874', particles = [ P.sv2__tilde__, P.sv2, P.su2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_634}) V_875 = Vertex(name = 'V_875', particles = [ P.sv3__tilde__, P.sv3, P.su2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_634}) V_876 = Vertex(name = 'V_876', particles = [ P.a, P.a, P.su2__tilde__, P.su2 ], color = [ 'Identity(3,4)' ], lorentz = [ L.VVSS1 ], couplings = {(0,0):C.GC_610}) V_877 = Vertex(name = 'V_877', particles = [ P.h02, P.su2__tilde__, P.su2 ], color = [ 'Identity(2,3)' ], lorentz = [ L.SSS1 ], couplings
#!/usr/bin/env python3 # -*- coding: cp1252 -*- ''' Created on 13.03.2018 GUI_forms.py widgits for adding new sequences or new projects to TypeLoader @author: <NAME> ''' # import modules: import sys, os, shutil, re from PyQt5.QtSql import QSqlQuery from PyQt5.QtWidgets import (QApplication, QFormLayout, QPushButton, QLineEdit, QDialog, QMessageBox) from PyQt5.Qt import pyqtSlot, pyqtSignal from PyQt5.QtGui import QIcon import general from typeloader_core import EMBLfunctions as EF from GUI_misc import settings_ok import db_internal # =========================================================== # parameters: # =========================================================== # classes: class NewProjectForm(QDialog): """a popup widget to create a new Typeloader Project """ project_changed = pyqtSignal(str) refresh_projects = pyqtSignal() def __init__(self, log, mydb, settings, parent=None): try: super().__init__(parent) self.settings = settings log.debug("Opening 'New Project' Dialog...") self.log = log self.mydb = mydb self.init_UI() self.setWindowTitle("Create a new project") self.setWindowIcon(QIcon(general.favicon)) if self.settings["modus"] == "debugging": self.fill_with_test_values() self.get_existing_projects() self.user = "" self.gene = "" self.pool = "" self.project_name = None self.submission_file = None self.success = False self.invalid_fields = [] self.output_file = None self.show() ok, msg = settings_ok("ENA", self.settings, self.log) if not ok: QMessageBox.warning(self, "Missing ENA settings", msg) self.close() except Exception as E: QMessageBox.warning(self, "Problem with NewProjectForm", "Could not open NewProjectForm:\n\n" + repr(E)) log.error(E) log.exception(E) def init_UI(self): """establish the widgets """ layout = QFormLayout() self.setLayout(layout) self.user_entry = QLineEdit(self.settings["user_name"], self) self.user_entry.setWhatsThis("Your name") layout.addRow("User:", self.user_entry) self.user_entry.setText(self.settings["user_name"]) self.gene_entry = QLineEdit(self) self.gene_entry.setFocus() self.gene_entry.setFixedWidth(175) layout.addRow("Gene:", self.gene_entry) self.gene_entry.setWhatsThis("The gene analyzed in this project. Use 'mixed' for multiple genes.") self.pool_entry = QLineEdit(self) layout.addRow("Pool:", self.pool_entry) self.pool_entry.setWhatsThis("Name for the sample pool, required by ENA.") self.pool_entry.textChanged.connect(self.check_ready_project) self.title_entry = QLineEdit(self) layout.addRow("Title:", self.title_entry) self.title_entry.setPlaceholderText("(optional)") self.title_entry.setWhatsThis( "An optional short project title. If you set none, a default text is generated for ENA.") self.desc_entry = QLineEdit(self) layout.addRow("Description:", self.desc_entry) self.desc_entry.setPlaceholderText("(optional)") self.desc_entry.setWhatsThis("An optional project description. Useful for later filtering.") self.project_btn = QPushButton("Click to generate", self) layout.addRow("Project Name:", self.project_btn) self.project_btn.setEnabled(False) self.project_btn.clicked.connect(self.on_projectBtn_clicked) self.project_btn.setWhatsThis( "Click here to generate the project name. Can only be clicked after all necessary fields above have been filled.") self.submit_btn = QPushButton("Start new project", self) layout.addRow(self.submit_btn) self.submit_btn.setEnabled(False) self.submit_btn.clicked.connect(self.on_submitBtn_clicked) self.submit_btn.setWhatsThis( "Click here to submit the project to ENA, receive a project accession number, and save the project. Can only be clicked after a project name has been generated.") self.acc_entry = QLineEdit(self) layout.addRow("ENA Project Nr.:", self.acc_entry) self.acc_entry.setWhatsThis("Project accession number received from ENA.") self.close_btn = QPushButton("Done", self) layout.addRow(self.close_btn) self.close_btn.clicked.connect(self.close_me) self.close_btn.setWhatsThis("Click to leave this dialog.") @pyqtSlot() def check_ready_project(self): """check whether all required fields have content; if yes, enable project_btn """ self.user = self.user_entry.text() self.gene = self.gene_entry.text() self.pool = self.pool_entry.text() if self.user and self.gene and self.pool: self.project_btn.ready = True self.project_btn.setEnabled(True) self.project_btn.setStyleSheet(general.btn_style_ready) def get_existing_projects(self): """gets a list of all existing projects """ self.log.debug("Getting all existing projects from database...") query = "select project_name from projects" success, data = db_internal.execute_query(query, 1, self.log, "retrieving existing projcts", "Database error", self) self.existing_projects = [] if success: if data: self.existing_projects = [project for (project,) in data] def get_values(self): """retrieves all values from the GUI """ self.log.debug("Getting all infos from the GUI...") self.check_ready_project() self.title = self.title_entry.text().strip() self.description = self.desc_entry.text().strip() if not self.title: self.title = self.pool self.title_entry.setText(self.pool) def check_all_fields_valid(self): """checks whether all fields contain only valid characters """ self.log.debug("\tChecking whether all fields are ok...") self.invalid_fields = [] allowed_characters = '^[a-zA-Z0-9-]+$' # only alphanumeric characters or hyphens fields_to_test = [("gene", self.gene), ("pool", self.pool)] for (field, value) in fields_to_test: valid = re.match(allowed_characters, value) if not valid: self.invalid_fields.append(field) self.log.info("=> invalid character found in {}: {}!".format(field, value)) secondary_fields = [("user name", self.user), ("title", self.title), ("description", self.description)] allowed_characters = '^[a-zA-Z0-9- ]+$' # these may also contain spaces for (field, value) in secondary_fields: if value: valid = re.match(allowed_characters, value) if not valid: self.invalid_fields.append(field) self.log.info("\t=> invalid character found in {}: {}!".format(field, value)) invalid_msg = "" if self.invalid_fields: invalid_fields = " and ".join(self.invalid_fields) invalid_msg = "Invalid character found in {}!\n".format(invalid_fields) invalid_msg += "Please don't use anything but letters, numbers or hyphens in your fields.\n" invalid_msg += "(Title, description and user name may also contain spaces.)" else: self.log.debug("\t=> everything ok") return invalid_msg @pyqtSlot() def on_projectBtn_clicked(self): """generates project_name out of given fields & displays it on itself """ self.log.debug("Generating project name...") self.check_ready_project() if self.user == self.settings["user_name"]: initials = self.settings["short_name"] else: initials = "".join(word[0].upper() for word in self.user.split()) date = general.timestamp("%Y%m%d") self.get_values() invalid_msg = self.check_all_fields_valid() if invalid_msg: QMessageBox.warning(self, "Invalid character in {}".format(" and ".join(self.invalid_fields)), invalid_msg) return False try: self.project_name = "_".join([date, initials, self.gene, self.pool]) self.project_name = self.project_name.replace(" ", "-") except Exception as E: self.log.error(E) QMessageBox.warning(self, "Cannot create project name!", "Cannot create a project name with the given parameters (see error below).\nPlease adjust them!\n\n{}".format( E)) return self.log.debug("=> project name {} assigned".format(self.project_name)) if self.project_name in self.existing_projects: self.log.warning("Project '{}' already exists! Choose a different pool name!".format(self.project_name)) QMessageBox.warning(self, "Project name not unique!", """A project named '{}' already exists!\nPlease choose a different pool name. """.format(self.project_name)) return else: self.project_btn.setText(self.project_name) self.submit_btn.setEnabled(True) self.submit_btn.setStyleSheet(general.btn_style_ready) self.project_btn.setStyleSheet(general.btn_style_normal) @pyqtSlot() def on_submitBtn_clicked(self): """submits data to ENA & shows the accession-ID """ try: # for debugging self.log.debug("Submitting project to ENA...") ## create variables successful_transmit = "false" xml_center_name = self.settings["xml_center_name"] ## Creating XML files self.project_dir = os.path.join(self.settings["projects_dir"], self.project_name) self.log.debug("Creating {}".format(self.project_dir)) try: os.makedirs(self.project_dir) except WindowsError: self.log.warning("'{}' already exists".format(self.project_dir)) self.project_xml = EF.generate_project_xml(self.title, self.description, self.project_name, xml_center_name) self.project_filename = os.path.join(self.project_dir, self.project_name + ".xml") if os.path.exists(self.project_filename): info_exists = "File '{}' already exist. Please change pool name.".format(self.project_filename) QMessageBox.warning(self, "ALIAS already exists!", info_exists) self.log.warning("File " + self.project_filename + " already exist, use another pool name") else: success = EF.write_file(self.project_xml, self.project_filename, self.log) if not success: msg = "Could not write to {}!".format(self.project_filename) QMessageBox(self, "Error writing project xml file!", msg) else: submission_alias = self.project_name + "_sub" submission_project_xml = EF.generate_submission_project_xml(submission_alias, xml_center_name, self.project_filename) self.submission_file = os.path.join(self.project_dir, submission_alias + ".xml") success = EF.write_file(submission_project_xml, self.submission_file, self.log) if not success: msg = "Could not write to {}!".format(self.submission_file) QMessageBox(self, "Error writing project submission xml file!", msg) else: ## Communicate with EMBL self.log.info("Submitting new project to EMBL...") server = self.settings["embl_submission"] proxy = self.settings["proxy"] self.output_file = os.path.join(self.project_dir, self.project_name + "_output.xml") userpwd = "{}:{}".format(self.settings["ftp_user"], self.settings["ftp_pwd"]) study_err = EF.submit_project_ENA(self.submission_file, self.project_filename, "PROJECT", server, proxy, self.output_file, userpwd) if study_err: self.log.exception(study_err) QMessageBox.warning(self, "Error during ENA submission!", "Project submission to ENA did not work:\n\n{}!".format(study_err)) else: self.log.info("=> Submission sent, awaiting response...") successful_transmit, self.submission_ID, info_xml, error_xml, _ = EF.parse_register_EMBL_xml( self.output_file, "SUBMISSION") successful_transmit, self.accession_ID, info_xml, error_xml, _ = EF.parse_register_EMBL_xml( self.output_file, "PROJECT") # TODO: (future) cleanup: put all parsing into one function, add EXT_ID if error_xml: if info_xml == "known error": error_msg = error_xml elif error_xml == "Internal Server Error": error_msg = "Internal Server Error.\nPlease check https://wwwdev.ebi.ac.uk/ena/submit/webin/login for details." elif isinstance(error_xml, str): error_msg = error_xml else: error_msg = "{}: {}".format(type(error_xml), str(error_xml)) self.log.error(error_xml) self.log.exception(error_xml) if "The object being added already exists in the submission account with accession" in error_xml: msg = "The project" + error_xml.split("The object")[1] msg += "\nPlease choose another pool name and try again!" QMessageBox.warning(self, "Project name already in use", msg) else: QMessageBox.warning(self, "Error during ENA submission!", "ENA response:\n\n{}".format(str(error_msg))) successful_transmit = False self.submit_btn.setEnabled(False) if successful_transmit == "true": self.log.debug("\t=> transmission to ENA successful") success = self.add_project_to_db() if success: self.acc_entry.setText(self.accession_ID) self.close_btn.setStyleSheet(general.btn_style_ready) self.submit_btn.setStyleSheet(general.btn_style_normal) self.success = True else: QMessageBox.warning(self, "Internal database problem", "Could not add project '{}' to the internal database!".format( self.project_name)) else: QMessageBox.warning(self, "ENA project submission failed", "ENA submission was not successful. Please try again!") self.log.warning("ENA project submission failed!") except Exception as E: self.log.error("Error in ENA project submission!") self.log.exception(E) if not self.success: self.log.info("Project creation was not successful. Removing all files from {}...".format(self.project_dir)) try: shutil.rmtree(self.project_dir) except Exception as E: self.log.debug("=> File deletion did not work:") self.log.error(E) self.log.exception(E) pass @pyqtSlot() def add_project_to_db(self): """adds all info about a project to the projects table """ self.log.debug("Adding new project to database...") mydate = general.timestamp("%d.%m.%Y") query = """INSERT INTO projects VALUES ('{}', 'Open', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}'); """.format(self.project_name, mydate, self.user, self.gene, self.pool, self.title, self.description, self.accession_ID, self.submission_ID) q = QSqlQuery() q.exec_(query) lasterr = q.lastError() if lasterr.isValid(): self.log.error(lasterr.text()) if lasterr.text().startswith("UNIQUE constraint failed:"): self.project_btn.setText("Such a project exists already!") self.project_btn.setStyleSheet(general.btn_style_clickme) self.submit_btn.setEnabled(False) self.accession_ID = "" self.acc_entry.setText(self.accession_ID) success = False else: self.log.debug("=> Added to database successfully") success = True return success def fill_with_test_values(self): """for debugging/development """ self.gene_entry.setText("HLA-B") self.pool_entry.setText("NEB1") @pyqtSlot() def close_me(self): """emits project-changed before closing the dialog """ if self.project_name: self.project_changed.emit(self.project_name) self.refresh_projects.emit() self.log.debug("'Project_changed' emitted") self.close() # =========================================================== # main: if __name__ == '__main__': from typeloader_GUI import create_connection, close_connection import GUI_login log = general.start_log(level="DEBUG") log.info("<Start {}>".format(os.path.basename(__file__))) settings_dic = GUI_login.get_settings("admin",