gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import xml.sax import hashlib import string import collections from boto.connection import AWSQueryConnection from boto.exception import BotoServerError import boto.mws.exception import boto.mws.response from boto.handler import XmlHandler from boto.compat import filter, map, six, encodebytes __all__ = ['MWSConnection'] api_version_path = { 'Feeds': ('2009-01-01', 'Merchant', '/'), 'Reports': ('2009-01-01', 'Merchant', '/'), 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), 'Inbound': ('2010-10-01', 'SellerId', '/FulfillmentInboundShipment/2010-10-01'), 'Outbound': ('2010-10-01', 'SellerId', '/FulfillmentOutboundShipment/2010-10-01'), 'Inventory': ('2010-10-01', 'SellerId', '/FulfillmentInventory/2010-10-01'), 'Recommendations': ('2013-04-01', 'SellerId', '/Recommendations/2013-04-01'), 'CustomerInfo': ('2014-03-01', 'SellerId', '/CustomerInformation/2014-03-01'), 'CartInfo': ('2014-03-01', 'SellerId', '/CartInformation/2014-03-01'), 'Subscriptions': ('2013-07-01', 'SellerId', '/Subscriptions/2013-07-01'), 'OffAmazonPayments': ('2013-01-01', 'SellerId', '/OffAmazonPayments/2013-01-01'), } content_md5 = lambda c: encodebytes(hashlib.md5(c).digest()).strip() decorated_attrs = ('action', 'response', 'section', 'quota', 'restore', 'version') api_call_map = {} def add_attrs_from(func, to): for attr in decorated_attrs: setattr(to, attr, getattr(func, attr, None)) to.__wrapped__ = func return to def structured_lists(*fields): def decorator(func): def wrapper(self, *args, **kw): for key, acc in [f.split('.') for f in fields]: if key in kw: newkey = key + '.' + acc + (acc and '.' or '') for i in range(len(kw[key])): kw[newkey + str(i + 1)] = kw[key][i] kw.pop(key) return func(self, *args, **kw) wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator def http_body(field): def decorator(func): def wrapper(*args, **kw): if any([f not in kw for f in (field, 'content_type')]): message = "{0} requires {1} and content_type arguments for " \ "building HTTP body".format(func.action, field) raise KeyError(message) kw['body'] = kw.pop(field) kw['headers'] = { 'Content-Type': kw.pop('content_type'), 'Content-MD5': content_md5(kw['body']), } return func(*args, **kw) wrapper.__doc__ = "{0}\nRequired HTTP Body: " \ "{1}".format(func.__doc__, field) return add_attrs_from(func, to=wrapper) return decorator def destructure_object(value, into, prefix, members=False): if isinstance(value, boto.mws.response.ResponseElement): destructure_object(value.__dict__, into, prefix, members=members) elif isinstance(value, collections.Mapping): for name in value: if name.startswith('_'): continue destructure_object(value[name], into, prefix + '.' + name, members=members) elif isinstance(value, six.string_types): into[prefix] = value elif isinstance(value, collections.Iterable): for index, element in enumerate(value): suffix = (members and '.member.' or '.') + str(index + 1) destructure_object(element, into, prefix + suffix, members=members) elif isinstance(value, bool): into[prefix] = str(value).lower() else: into[prefix] = value def structured_objects(*fields, **kwargs): def decorator(func): def wrapper(*args, **kw): members = kwargs.get('members', False) for field in filter(lambda i: i in kw, fields): destructure_object(kw.pop(field), kw, field, members=members) return func(*args, **kw) wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \ "(ResponseElement or anything iterable/dict-like)" \ .format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator def requires(*groups): def decorator(func): def requires(*args, **kw): hasgroup = lambda group: all(key in kw for key in group) if 1 != len(list(filter(hasgroup, groups))): message = ' OR '.join(['+'.join(g) for g in groups]) message = "{0} requires {1} argument(s)" \ "".format(func.action, message) raise KeyError(message) return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) requires.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, message) return add_attrs_from(func, to=requires) return decorator def exclusive(*groups): def decorator(func): def wrapper(*args, **kw): hasgroup = lambda group: all(key in kw for key in group) if len(list(filter(hasgroup, groups))) not in (0, 1): message = ' OR '.join(['+'.join(g) for g in groups]) message = "{0} requires either {1}" \ "".format(func.action, message) raise KeyError(message) return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__, message) return add_attrs_from(func, to=wrapper) return decorator def dependent(field, *groups): def decorator(func): def wrapper(*args, **kw): hasgroup = lambda group: all(key in kw for key in group) if field in kw and not any(hasgroup(g) for g in groups): message = ' OR '.join(['+'.join(g) for g in groups]) message = "{0} argument {1} requires {2}" \ "".format(func.action, field, message) raise KeyError(message) return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__, field, message) return add_attrs_from(func, to=wrapper) return decorator def requires_some_of(*fields): def decorator(func): def requires(*args, **kw): if not any(i in kw for i in fields): message = "{0} requires at least one of {1} argument(s)" \ "".format(func.action, ', '.join(fields)) raise KeyError(message) return func(*args, **kw) requires.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=requires) return decorator def boolean_arguments(*fields): def decorator(func): def wrapper(*args, **kw): for field in [f for f in fields if isinstance(kw.get(f), bool)]: kw[field] = str(kw[field]).lower() return func(*args, **kw) wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator def api_action(section, quota, restore, *api): def decorator(func, quota=int(quota), restore=float(restore)): version, accesskey, path = api_version_path[section] action = ''.join(api or map(str.capitalize, func.__name__.split('_'))) def wrapper(self, *args, **kw): kw.setdefault(accesskey, getattr(self, accesskey, None)) if kw[accesskey] is None: message = "{0} requires {1} argument. Set the " \ "MWSConnection.{2} attribute?" \ "".format(action, accesskey, accesskey) raise KeyError(message) kw['Action'] = action kw['Version'] = version response = self._response_factory(action, connection=self) request = dict(path=path, quota=quota, restore=restore) return func(self, request, response, *args, **kw) for attr in decorated_attrs: setattr(wrapper, attr, locals().get(attr)) wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ "{4}".format(action, version, quota, restore, func.__doc__) api_call_map[action] = func.__name__ return wrapper return decorator class MWSConnection(AWSQueryConnection): ResponseFactory = boto.mws.response.ResponseFactory ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory def __init__(self, *args, **kw): kw.setdefault('host', 'mws.amazonservices.com') self._sandboxed = kw.pop('sandbox', False) self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') self.SellerId = kw.pop('SellerId', None) or self.Merchant kw = self._setup_factories(kw.pop('factory_scopes', []), **kw) super(MWSConnection, self).__init__(*args, **kw) def _setup_factories(self, extrascopes, **kw): for factory, (scope, Default) in { 'response_factory': (boto.mws.response, self.ResponseFactory), 'response_error_factory': (boto.mws.exception, self.ResponseErrorFactory), }.items(): if factory in kw: setattr(self, '_' + factory, kw.pop(factory)) else: scopes = extrascopes + [scope] setattr(self, '_' + factory, Default(scopes=scopes)) return kw def _sandboxify(self, path): if not self._sandboxed: return path splat = path.split('/') splat[-2] += '_Sandbox' return '/'.join(splat) def _required_auth_capability(self): return ['mws'] def _post_request(self, request, params, parser, body='', headers=None): """Make a POST request, optionally with a content body, and return the response, optionally as raw text. """ headers = headers or {} path = self._sandboxify(request['path']) request = self.build_base_http_request('POST', path, None, data=body, params=params, headers=headers, host=self.host) try: response = self._mexe(request, override_num_retries=None) except BotoServerError as bs: raise self._response_error_factory(bs.status, bs.reason, bs.body) body = response.read() boto.log.debug(body) if not body: boto.log.error('Null body %s' % body) raise self._response_error_factory(response.status, response.reason, body) if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self._response_error_factory(response.status, response.reason, body) digest = response.getheader('Content-MD5') if digest is not None: assert content_md5(body) == digest contenttype = response.getheader('Content-Type') return self._parse_response(parser, contenttype, body) def _parse_response(self, parser, contenttype, body): if not contenttype.startswith('text/xml'): return body handler = XmlHandler(parser, self) xml.sax.parseString(body, handler) return parser def method_for(self, name): """Return the MWS API method referred to in the argument. The named method can be in CamelCase or underlined_lower_case. This is the complement to MWSConnection.any_call.action """ action = '_' in name and string.capwords(name, '_') or name if action in api_call_map: return getattr(self, api_call_map[action]) return None def iter_call(self, call, *args, **kw): """Pass a call name as the first argument and a generator is returned for the initial response and any continuation call responses made using the NextToken. """ method = self.method_for(call) assert method, 'No call named "{0}"'.format(call) return self.iter_response(method(*args, **kw)) def iter_response(self, response): """Pass a call's response as the initial argument and a generator is returned for the initial response and any continuation call responses made using the NextToken. """ yield response more = self.method_for(response._action + 'ByNextToken') while more and response._result.HasNext == 'true': response = more(NextToken=response._result.NextToken) yield response @requires(['FeedType']) @boolean_arguments('PurgeAndReplace') @http_body('FeedContent') @structured_lists('MarketplaceIdList.Id') @api_action('Feeds', 15, 120) def submit_feed(self, request, response, headers=None, body='', **kw): """Uploads a feed for processing by Amazon MWS. """ headers = headers or {} return self._post_request(request, kw, response, body=body, headers=headers) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) def get_feed_submission_list(self, request, response, **kw): """Returns a list of all feed submissions submitted in the previous 90 days. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Feeds', 0, 0) def get_feed_submission_list_by_next_token(self, request, response, **kw): """Returns a list of feed submissions using the NextToken parameter. """ return self._post_request(request, kw, response) @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) def get_feed_submission_count(self, request, response, **kw): """Returns a count of the feeds submitted in the previous 90 days. """ return self._post_request(request, kw, response) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') @api_action('Feeds', 10, 45) def cancel_feed_submissions(self, request, response, **kw): """Cancels one or more feed submissions and returns a count of the feed submissions that were canceled. """ return self._post_request(request, kw, response) @requires(['FeedSubmissionId']) @api_action('Feeds', 15, 60) def get_feed_submission_result(self, request, response, **kw): """Returns the feed processing report. """ return self._post_request(request, kw, response) def get_service_status(self, **kw): """Instruct the user on how to get service status. """ sections = ', '.join(map(str.lower, api_version_path.keys())) message = "Use {0}.get_(section)_service_status(), " \ "where (section) is one of the following: " \ "{1}".format(self.__class__.__name__, sections) raise AttributeError(message) @requires(['ReportType']) @structured_lists('MarketplaceIdList.Id') @boolean_arguments('ReportOptions=ShowSalesChannel') @api_action('Reports', 15, 60) def request_report(self, request, response, **kw): """Creates a report request and submits the request to Amazon MWS. """ return self._post_request(request, kw, response) @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) def get_report_request_list(self, request, response, **kw): """Returns a list of report requests that you can use to get the ReportRequestId for a report. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) def get_report_request_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportRequestListByNextToken or GetReportRequestList, where the value of HasNext was true in that previous request. """ return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) def get_report_request_count(self, request, response, **kw): """Returns a count of report requests that have been submitted to Amazon MWS for processing. """ return self._post_request(request, kw, response) @api_action('Reports', 10, 45) def cancel_report_requests(self, request, response, **kw): """Cancel one or more report requests, returning the count of the canceled report requests and the report request information. """ return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') @api_action('Reports', 10, 60) def get_report_list(self, request, response, **kw): """Returns a list of reports that were created in the previous 90 days that match the query parameters. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) def get_report_list_by_next_token(self, request, response, **kw): """Returns a list of reports using the NextToken, which was supplied by a previous request to either GetReportListByNextToken or GetReportList, where the value of HasNext was true in the previous call. """ return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) def get_report_count(self, request, response, **kw): """Returns a count of the reports, created in the previous 90 days, with a status of _DONE_ and that are available for download. """ return self._post_request(request, kw, response) @requires(['ReportId']) @api_action('Reports', 15, 60) def get_report(self, request, response, **kw): """Returns the contents of a report. """ return self._post_request(request, kw, response) @requires(['ReportType', 'Schedule']) @api_action('Reports', 10, 45) def manage_report_schedule(self, request, response, **kw): """Creates, updates, or deletes a report request schedule for a specified report type. """ return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) def get_report_schedule_list(self, request, response, **kw): """Returns a list of order report requests that are scheduled to be submitted to Amazon MWS for processing. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) def get_report_schedule_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportScheduleListByNextToken or GetReportScheduleList, where the value of HasNext was true in that previous request. """ return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) def get_report_schedule_count(self, request, response, **kw): """Returns a count of order report requests that are scheduled to be submitted to Amazon MWS. """ return self._post_request(request, kw, response) @requires(['ReportIdList']) @boolean_arguments('Acknowledged') @structured_lists('ReportIdList.Id') @api_action('Reports', 10, 45) def update_report_acknowledgements(self, request, response, **kw): """Updates the acknowledged status of one or more reports. """ return self._post_request(request, kw, response) @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') @api_action('Inbound', 30, 0.5) def create_inbound_shipment_plan(self, request, response, **kw): """Returns the information required to create an inbound shipment. """ return self._post_request(request, kw, response) @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) def create_inbound_shipment(self, request, response, **kw): """Creates an inbound shipment. """ return self._post_request(request, kw, response) @requires(['ShipmentId']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) def update_inbound_shipment(self, request, response, **kw): """Updates an existing inbound shipment. Amazon documentation is ambiguous as to whether the InboundShipmentHeader and InboundShipmentItems arguments are required. """ return self._post_request(request, kw, response) @requires_some_of('ShipmentIdList', 'ShipmentStatusList') @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') @api_action('Inbound', 30, 0.5) def list_inbound_shipments(self, request, response, **kw): """Returns a list of inbound shipments based on criteria that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) def list_inbound_shipments_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipments using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) @api_action('Inbound', 30, 0.5) def list_inbound_shipment_items(self, request, response, **kw): """Returns a list of items in a specified inbound shipment, or a list of items that were updated within a specified time frame. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) def list_inbound_shipment_items_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ return self._post_request(request, kw, response) @api_action('Inbound', 2, 300, 'GetServiceStatus') def get_inbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inbound Shipment API section. """ return self._post_request(request, kw, response) @requires(['SellerSkus'], ['QueryStartDateTime']) @structured_lists('SellerSkus.member') @api_action('Inventory', 30, 0.5) def list_inventory_supply(self, request, response, **kw): """Returns information about the availability of a seller's inventory. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inventory', 30, 0.5) def list_inventory_supply_by_next_token(self, request, response, **kw): """Returns the next page of information about the availability of a seller's inventory using the NextToken parameter. """ return self._post_request(request, kw, response) @api_action('Inventory', 2, 300, 'GetServiceStatus') def get_inventory_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inventory API section. """ return self._post_request(request, kw, response) @requires(['PackageNumber']) @api_action('Outbound', 30, 0.5) def get_package_tracking_details(self, request, response, **kw): """Returns delivery tracking information for a package in an outbound shipment for a Multi-Channel Fulfillment order. """ return self._post_request(request, kw, response) @requires(['Address', 'Items']) @structured_objects('Address', 'Items') @api_action('Outbound', 30, 0.5) def get_fulfillment_preview(self, request, response, **kw): """Returns a list of fulfillment order previews based on items and shipping speed categories that you specify. """ return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', 'ShippingSpeedCategory', 'DisplayableOrderDateTime', 'DestinationAddress', 'DisplayableOrderComment', 'Items']) @structured_objects('DestinationAddress', 'Items') @api_action('Outbound', 30, 0.5) def create_fulfillment_order(self, request, response, **kw): """Requests that Amazon ship items from the seller's inventory to a destination address. """ return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) def get_fulfillment_order(self, request, response, **kw): """Returns a fulfillment order based on a specified SellerFulfillmentOrderId. """ return self._post_request(request, kw, response) @api_action('Outbound', 30, 0.5) def list_all_fulfillment_orders(self, request, response, **kw): """Returns a list of fulfillment orders fulfilled after (or at) a specified date or by fulfillment method. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Outbound', 30, 0.5) def list_all_fulfillment_orders_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) def cancel_fulfillment_order(self, request, response, **kw): """Requests that Amazon stop attempting to fulfill an existing fulfillment order. """ return self._post_request(request, kw, response) @api_action('Outbound', 2, 300, 'GetServiceStatus') def get_outbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Outbound API section. """ return self._post_request(request, kw, response) @requires(['CreatedAfter'], ['LastUpdatedAfter']) @requires(['MarketplaceId']) @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) @dependent('CreatedBefore', ['CreatedAfter']) @exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId']) @dependent('LastUpdatedBefore', ['LastUpdatedAfter']) @exclusive(['CreatedAfter'], ['LastUpdatedBefore']) @structured_objects('OrderTotal', 'ShippingAddress', 'PaymentExecutionDetail') @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', 'FulfillmentChannel.Channel', 'PaymentMethod.') @api_action('Orders', 6, 60) def list_orders(self, request, response, **kw): """Returns a list of orders created or updated during a time frame that you specify. """ toggle = set(('FulfillmentChannel.Channel.1', 'OrderStatus.Status.1', 'PaymentMethod.1', 'LastUpdatedAfter', 'LastUpdatedBefore')) for do, dont in { 'BuyerEmail': toggle.union(['SellerOrderId']), 'SellerOrderId': toggle.union(['BuyerEmail']), }.items(): if do in kw and any(i in dont for i in kw): message = "Don't include {0} when specifying " \ "{1}".format(' or '.join(dont), do) raise AssertionError(message) return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 6, 60) def list_orders_by_next_token(self, request, response, **kw): """Returns the next page of orders using the NextToken value that was returned by your previous request to either ListOrders or ListOrdersByNextToken. """ return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @structured_lists('AmazonOrderId.Id') @api_action('Orders', 6, 60) def get_order(self, request, response, **kw): """Returns an order for each AmazonOrderId that you specify. """ return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @api_action('Orders', 30, 2) def list_order_items(self, request, response, **kw): """Returns order item information for an AmazonOrderId that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 30, 2) def list_order_items_by_next_token(self, request, response, **kw): """Returns the next page of order items using the NextToken value that was returned by your previous request to either ListOrderItems or ListOrderItemsByNextToken. """ return self._post_request(request, kw, response) @api_action('Orders', 2, 300, 'GetServiceStatus') def get_orders_service_status(self, request, response, **kw): """Returns the operational status of the Orders API section. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Query']) @api_action('Products', 20, 20) def list_matching_products(self, request, response, **kw): """Returns a list of products and their attributes, ordered by relevancy, based on a search query that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 20) def get_matching_product(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of ASIN values that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'IdType', 'IdList']) @structured_lists('IdList.Id') @api_action('Products', 20, 20) def get_matching_product_for_id(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of Product IDs that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') def get_competitive_pricing_for_sku(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the SellerSKUs and MarketplaceId that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') def get_competitive_pricing_for_asin(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the ASINs and MarketplaceId that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') def get_lowest_offer_listings_for_sku(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and SellerSKUs. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') def get_lowest_offer_listings_for_asin(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and ASINs. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKU']) @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') def get_product_categories_for_sku(self, request, response, **kw): """Returns the product categories that a SellerSKU belongs to. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASIN']) @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') def get_product_categories_for_asin(self, request, response, **kw): """Returns the product categories that an ASIN belongs to. """ return self._post_request(request, kw, response) @api_action('Products', 2, 300, 'GetServiceStatus') def get_products_service_status(self, request, response, **kw): """Returns the operational status of the Products API section. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 10, 'GetMyPriceForSKU') def get_my_price_for_sku(self, request, response, **kw): """Returns pricing information for your own offer listings, based on SellerSKU. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 10, 'GetMyPriceForASIN') def get_my_price_for_asin(self, request, response, **kw): """Returns pricing information for your own offer listings, based on ASIN. """ return self._post_request(request, kw, response) @api_action('Sellers', 15, 60) def list_marketplace_participations(self, request, response, **kw): """Returns a list of marketplaces that the seller submitting the request can sell in, and a list of participations that include seller-specific information in that marketplace. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Sellers', 15, 60) def list_marketplace_participations_by_next_token(self, request, response, **kw): """Returns the next page of marketplaces and participations using the NextToken value that was returned by your previous request to either ListMarketplaceParticipations or ListMarketplaceParticipationsByNextToken. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @api_action('Recommendations', 5, 2) def get_last_updated_time_for_recommendations(self, request, response, **kw): """Checks whether there are active recommendations for each category for the given marketplace, and if there are, returns the time when recommendations were last updated for each category. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @structured_lists('CategoryQueryList.CategoryQuery') @api_action('Recommendations', 5, 2) def list_recommendations(self, request, response, **kw): """Returns your active recommendations for a specific category or for all categories for a specific marketplace. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Recommendations', 5, 2) def list_recommendations_by_next_token(self, request, response, **kw): """Returns the next page of recommendations using the NextToken parameter. """ return self._post_request(request, kw, response) @api_action('Recommendations', 2, 300, 'GetServiceStatus') def get_recommendations_service_status(self, request, response, **kw): """Returns the operational status of the Recommendations API section. """ return self._post_request(request, kw, response) @api_action('CustomerInfo', 15, 12) def list_customers(self, request, response, **kw): """Returns a list of customer accounts based on search criteria that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('CustomerInfo', 50, 3) def list_customers_by_next_token(self, request, response, **kw): """Returns the next page of customers using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['CustomerIdList']) @structured_lists('CustomerIdList.CustomerId') @api_action('CustomerInfo', 15, 12) def get_customers_for_customer_id(self, request, response, **kw): """Returns a list of customer accounts based on search criteria that you specify. """ return self._post_request(request, kw, response) @api_action('CustomerInfo', 2, 300, 'GetServiceStatus') def get_customerinfo_service_status(self, request, response, **kw): """Returns the operational status of the Customer Information API section. """ return self._post_request(request, kw, response) @requires(['DateRangeStart']) @api_action('CartInfo', 15, 12) def list_carts(self, request, response, **kw): """Returns a list of shopping carts in your Webstore that were last updated during the time range that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('CartInfo', 50, 3) def list_carts_by_next_token(self, request, response, **kw): """Returns the next page of shopping carts using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['CartIdList']) @structured_lists('CartIdList.CartId') @api_action('CartInfo', 15, 12) def get_carts(self, request, response, **kw): """Returns shopping carts based on the CartId values that you specify. """ return self._post_request(request, kw, response) @api_action('CartInfo', 2, 300, 'GetServiceStatus') def get_cartinfo_service_status(self, request, response, **kw): """Returns the operational status of the Cart Information API section. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def register_destination(self, request, response, **kw): """Specifies a new destination where you want to receive notifications. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def deregister_destination(self, request, response, **kw): """Removes an existing destination from the list of registered destinations. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @api_action('Subscriptions', 25, 0.5) def list_registered_destinations(self, request, response, **kw): """Lists all current destinations that you have registered. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def send_test_notification_to_destination(self, request, response, **kw): """Sends a test notification to an existing destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Subscription']) @structured_objects('Subscription', members=True) @api_action('Subscriptions', 25, 0.5) def create_subscription(self, request, response, **kw): """Creates a new subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'NotificationType', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def get_subscription(self, request, response, **kw): """Gets the subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'NotificationType', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def delete_subscription(self, request, response, **kw): """Deletes the subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @api_action('Subscriptions', 25, 0.5) def list_subscriptions(self, request, response, **kw): """Returns a list of all your current subscriptions. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Subscription']) @structured_objects('Subscription', members=True) @api_action('Subscriptions', 25, 0.5) def update_subscription(self, request, response, **kw): """Updates the subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @api_action('Subscriptions', 2, 300, 'GetServiceStatus') def get_subscriptions_service_status(self, request, response, **kw): """Returns the operational status of the Subscriptions API section. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes']) @structured_objects('OrderReferenceAttributes') @api_action('OffAmazonPayments', 10, 1) def set_order_reference_details(self, request, response, **kw): """Sets order reference details such as the order total and a description for the order. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 20, 2) def get_order_reference_details(self, request, response, **kw): """Returns details about the Order Reference object and its current state. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 10, 1) def confirm_order_reference(self, request, response, **kw): """Confirms that the order reference is free of constraints and all required information has been set on the order reference. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 10, 1) def cancel_order_reference(self, request, response, **kw): """Cancel an order reference; all authorizations associated with this order reference are also closed. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 10, 1) def close_order_reference(self, request, response, **kw): """Confirms that an order reference has been fulfilled (fully or partially) and that you do not expect to create any new authorizations on this order reference. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId', 'AuthorizationAmount']) @structured_objects('AuthorizationAmount') @api_action('OffAmazonPayments', 10, 1) def authorize(self, request, response, **kw): """Reserves a specified amount against the payment method(s) stored in the order reference. """ return self._post_request(request, kw, response) @requires(['AmazonAuthorizationId']) @api_action('OffAmazonPayments', 20, 2) def get_authorization_details(self, request, response, **kw): """Returns the status of a particular authorization and the total amount captured on the authorization. """ return self._post_request(request, kw, response) @requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount']) @structured_objects('CaptureAmount') @api_action('OffAmazonPayments', 10, 1) def capture(self, request, response, **kw): """Captures funds from an authorized payment instrument. """ return self._post_request(request, kw, response) @requires(['AmazonCaptureId']) @api_action('OffAmazonPayments', 20, 2) def get_capture_details(self, request, response, **kw): """Returns the status of a particular capture and the total amount refunded on the capture. """ return self._post_request(request, kw, response) @requires(['AmazonAuthorizationId']) @api_action('OffAmazonPayments', 10, 1) def close_authorization(self, request, response, **kw): """Closes an authorization. """ return self._post_request(request, kw, response) @requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount']) @structured_objects('RefundAmount') @api_action('OffAmazonPayments', 10, 1) def refund(self, request, response, **kw): """Refunds a previously captured amount. """ return self._post_request(request, kw, response) @requires(['AmazonRefundId']) @api_action('OffAmazonPayments', 20, 2) def get_refund_details(self, request, response, **kw): """Returns the status of a particular refund. """ return self._post_request(request, kw, response) @api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus') def get_offamazonpayments_service_status(self, request, response, **kw): """Returns the operational status of the Off-Amazon Payments API section. """ return self._post_request(request, kw, response)
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides logic for determining results in a C++/Unity testapp. The initial motivation for this was when sending testapps to Firebase Test Lab. When using Game Loops, FTL will only report whether the app ran to completion, i.e. didn't crash or timeout, but is unable to know whether the internal test runners reported success or failure. To validate this, the log output is parsed for the test summary generated by each internal test runner. For C++ this was gTest, for Unity it was a custom runner. In both cases, the logic works by searching for text indicating the end of the internal tests, and then parsing the result summary. In cases where the test summary does not exist (crash, or timeout) a tail of the log is obtained instead, to help identify where the crash or timeout occurred. """ import datetime import os import re import json from absl import logging import attr from print_matrix_configuration import PARAMETERS UNITY = "unity" CPP = "cpp" def validate_results(log_text, platform): """Determines the results in the log output of a testapp. Args: log_text (str): Log output from a testapp. Can be None. platform (str): What type of testapp generated the log: 'unity' or 'cpp'. Returns: (TestResults): Structured results from the log. """ if not log_text: return TestResults(complete=False) if platform == UNITY: return validate_results_unity(log_text) elif platform == CPP: return validate_results_cpp(log_text) else: raise ValueError("Invalid platform: %s." % platform) def validate_results_unity(log_text): """Determines the results in the log output of a Unity testapp. Args: log_text (str): Log output from a Unity testapp. Returns: (TestResults): Structured results from the log. """ # The result summary looks like 'PASS: 4, FAIL: 7' results_match = re.search( r"PASS: (?P<pass>[0-9]+), FAIL: (?P<fail>[0-9]+)", log_text) if results_match: summary = results_match.group(0) # We also want the list of failed test cases, which are listed after # the result summary and look like this: # TestInstanceIdChangeAfterReset (6/8) log_tail = log_text[results_match.start():] failed_cases = re.findall(r"Test\w+ \(\d+\/\d+\)", log_tail) if failed_cases: summary += "\n" + "\n".join(failed_cases) match_dict = results_match.groupdict() return TestResults( complete=True, passes=int(match_dict["pass"]), fails=int(match_dict["fail"]), summary=summary) else: return TestResults(complete=False, summary=_tail(log_text, 15)) def validate_results_cpp(log_text): """Determines the results in the log output of a C++ testapp. Args: log_text (str): Log output from a C++ testapp. Returns: (TestResults): Structured results from the log. """ # The gtest runner dumps a useful summary of tests after the tear down. end_marker = "Global test environment tear-down" complete = end_marker in log_text if complete: # rpartition splits a string into three components around the final # occurrence of the end marker, returning a triplet (before, marker, after) result_summary = log_text.rpartition(end_marker)[2].lstrip() passes = re.search(r"\[ PASSED \] (?P<count>[0-9]+) test", result_summary) fails = re.search(r"\[ FAILED \] (?P<count>[0-9]+) test", result_summary) skips = re.search(r"\[ SKIPPED \] (?P<count>[0-9]+) test", result_summary) else: result_summary = _tail(log_text, 15) passes = None fails = None skips = None return TestResults( complete=complete, passes=0 if not passes else int(passes.group("count")), fails=0 if not fails else int(fails.group("count")), skips=0 if not skips else int(skips.group("count")), summary=result_summary) def summarize_test_results(tests, platform, summary_dir, file_name="summary.log"): """Summarizes and logs test results for multiple tests. Each 'test' should be an object with properties "testapp_path", which is a path to the binary, and "logs", which is a string containing the logs from that test. This will compute the results from each log's internal test summary. If the logs do not contain a test summary, that test's result will be "error". In addition to logging results, this will append a short summary to a file in summary_dir. Args: tests (Test): Objects containing str properties "testapp_path" and "logs". platform (str): What type of testapp generated the log: 'unity' or 'cpp'. summary_dir (str): Directory in which to append the summary file. Returns: (int): Return code. 0 for all success, 1 for any failures or errors. """ successes = [] failures = [] errors = [] success_testapp_paths = set() for test in tests: results = validate_results(test.logs, platform) test_result_pair = (test, results) if not results.complete: errors.append(test_result_pair) elif results.fails > 0: failures.append(test_result_pair) else: successes.append(test_result_pair) success_testapp_paths.add(test.testapp_path) # First log the successes, then the failures and errors, then the summary. # This way, debugging will involve checking the summary at the bottom, # then scrolling up for more information and seeing the failures first. # # For successes, just report the internal test summary, to reduce noise. # For failures, log the entire output for full diagnostic context. for test, results in successes: logging.info("%s:\n%s", test.testapp_path, results.summary) for test, _ in failures: logging.info("%s failed:\n%s", test.testapp_path, test.logs) for test, _ in errors: logging.info("%s didn't finish normally.\n%s", test.testapp_path, test.logs) # Testapps that failed first, but succeed after retry. (max 3 retry) flaky_testapps = [] failures_exclude_flakiness = [] errors_exclude_flakiness = [] # The summary is much more terse, to minimize the time it takes to understand # what went wrong, without necessarily providing full debugging context. summary = [] summary.append("TEST SUMMARY:") if successes: summary.append("%d TESTAPPS SUCCEEDED:" % len(successes)) summary.extend((test.testapp_path for (test, _) in successes)) if errors: summary.append("\n%d TESTAPPS EXPERIENCED ERRORS:" % len(errors)) for test, results in errors: summary.append("\n%s:" % test.testapp_path) if test.testapp_path in success_testapp_paths: summary.append("THIS TESTAPP IS FLAKY") flaky_testapps.append((test, results)) else: errors_exclude_flakiness.append((test, results)) if hasattr(test, "ftl_link") and test.ftl_link: summary.append("ftl_link: %s" % test.ftl_link) if hasattr(test, "raw_result_link") and test.raw_result_link: summary.append("raw_result_link: %s" % test.raw_result_link) if results.summary: summary.append("log tail:") summary.append(results.summary) else: summary.append( "%s lacks logs (crashed, not found, etc)" % test.testapp_path) if failures: summary.append("\n%d TESTAPPS FAILED:" % len(failures)) for test, results in failures: summary.append("\n%s:" % test.testapp_path) if test.testapp_path in success_testapp_paths: summary.append("THIS TESTAPP IS FLAKY") flaky_testapps.append((test, results)) else: failures_exclude_flakiness.append((test, results)) if hasattr(test, "ftl_link") and test.ftl_link: summary.append("ftl_link: %s" % test.ftl_link) if hasattr(test, "raw_result_link") and test.raw_result_link: summary.append("raw_result_link: %s" % test.raw_result_link) summary.append(results.summary) summary.append( "%d TESTAPPS TOTAL: %d PASSES, %d FAILURES, %d ERRORS" % (len(tests), len(successes), len(failures), len(errors))) if len(flaky_testapps) > 0 and len(flaky_testapps) == len(failures) + len(errors): logging.info("All failures and errors are due to flakiness.") summary.append("ALL THE FOLLOWING FAILURES AND ERRORS ARE DUE TO FLAKINESS:(") # summary_json format: # { "type": "test", # "testapps": [testapp], # "errors": {testapp:{"logs": [error_log], "ftl_links": [ftl_link], "raw_result_links": [raw_result_link]}}, # "failures": {testapp:{"logs": [error_log], "ftl_links": [ftl_link], "raw_result_links": [raw_result_link], # "failed_tests": {failed_test: test_log}}}, # "flakiness": {testapp:{"logs": [error_log], "ftl_links": [ftl_link], "raw_result_links": [raw_result_link], # "flaky_tests": {flaky_test: test_log}}}} summary_json = {} summary_json["type"] = "test" summary_json["testapps"] = [get_name(test.testapp_path) for test in tests] summary_json["errors"] = {get_name(test.testapp_path):{"logs": [], "ftl_links": [], "raw_result_links": []} for (test, _) in errors_exclude_flakiness} for (test, results) in errors_exclude_flakiness: testapp = get_name(test.testapp_path) summary_json["errors"][testapp]["logs"].append(results.summary) if hasattr(test, "ftl_link") and test.ftl_link: summary_json["errors"][testapp]["ftl_links"].append(test.ftl_link) if hasattr(test, "raw_result_link") and test.raw_result_link: summary_json["errors"][testapp]["raw_result_links"].append(test.raw_result_link) summary_json["failures"] = {get_name(test.testapp_path):{"logs": [], "ftl_links": [], "raw_result_links": [], "failed_tests": dict()} for (test, _) in failures_exclude_flakiness} for (test, results) in failures_exclude_flakiness: testapp = get_name(test.testapp_path) summary_json["failures"][testapp]["logs"].append(results.summary) if hasattr(test, "ftl_link") and test.ftl_link: summary_json["failures"][testapp]["ftl_links"].append(test.ftl_link) if hasattr(test, "raw_result_link") and test.raw_result_link: summary_json["failures"][testapp]["raw_result_links"].append(test.raw_result_link) failed_tests = re.findall(r"Test (.+) failed!", results.summary) for failed_test in failed_tests: summary_json["failures"][testapp]["failed_tests"][failed_test] = "See workflow log" summary.append("\n%s FAILED\n" % (failed_test)) summary_json["flakiness"] = {get_name(test.testapp_path):{"logs": [], "ftl_links": [], "raw_result_links": [], "flaky_tests": dict()} for (test, _) in flaky_testapps} for (test, results) in flaky_testapps: testapp = get_name(test.testapp_path) summary_json["flakiness"][testapp]["logs"].append(results.summary) if hasattr(test, "ftl_link") and test.ftl_link: summary_json["flakiness"][testapp]["ftl_links"].append(test.ftl_link) if hasattr(test, "raw_result_link") and test.raw_result_link: summary_json["flakiness"][testapp]["raw_result_links"].append(test.raw_result_link) flaky_tests = re.findall(r"Test (.+) failed!", results.summary) for flaky_test in flaky_tests: summary_json["flakiness"][testapp]["flaky_tests"][flaky_test] = "See workflow log" summary.append("\n%s FAILED\n" % (flaky_test)) with open(os.path.join(summary_dir, file_name+".json"), "a") as f: f.write(json.dumps(summary_json, indent=2)) summary = "\n".join(summary) write_summary(summary_dir, summary, file_name) # success or only flakiness if len(tests) == len(successes) or len(flaky_testapps) == len(failures) + len(errors): return 0 else: return 1 def write_summary(testapp_dir, summary, file_name="summary.log"): """Writes a summary of tests/builds to a file in the testapp directory. This will append the given summary to a file in the testapp directory, along with a timestamp. This summary's primary purpose is to aggregate results from separate steps on CI to be logged in a single summary step. Args: testapp_dir (str): Path to the directory of testapps being built or tested. summary (str): Short, readable multi-line summary of results. """ # This method serves as the source of truth on where to put the summary. os.makedirs(testapp_dir, exist_ok=True) with open(os.path.join(testapp_dir, file_name), "a") as f: # The timestamp mainly helps when running locally: if running multiple # tests on the same directory, the results will accumulate, with a timestamp # to help keep track of when a given test was run. timestamp = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S") f.write("\n%s\n%s\n" % (timestamp, summary)) def get_name(testapp_path): """Returns testapp api.""" testapps = PARAMETERS["integration_tests"]["config"]["apis"].split(",") for testapp in testapps: if testapp in testapp_path: return testapp return testapp_path def _tail(text, n): """Returns the last n lines in text, or all of text if too few lines.""" return "\n".join(text.splitlines()[-n:]) @attr.s(frozen=True, eq=False) class TestResults(object): complete = attr.ib() # Did the testapp reach the end, or did it crash/timeout passes = attr.ib(default=0) fails = attr.ib(default=0) skips = attr.ib(default=0) summary = attr.ib(default="") # Summary from internal runner OR tail of log
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Manages the state of what is installed in the cloud SDK. This tracks the installed modules along with the files they created. It also provides functionality like extracting tar files into the installation and tracking when we check for updates. """ import compileall import errno import logging import os import shutil import sys from googlecloudsdk.core import config from googlecloudsdk.core import exceptions from googlecloudsdk.core.console import console_attr from googlecloudsdk.core.console import console_io from googlecloudsdk.core.updater import installers from googlecloudsdk.core.updater import snapshots from googlecloudsdk.core.util import files as file_utils class Error(exceptions.Error): """Base exception for the local_state module.""" pass class InvalidSDKRootError(Error): """Error for when the root of the Cloud SDK is invalid or cannot be found.""" def __init__(self): super(InvalidSDKRootError, self).__init__( 'The components management action could not be performed because the ' 'installation root of the Cloud SDK could not be located. ' 'If you previously used the Cloud SDK installer, ' 'you could re-install the the SDK and retry again.') class InvalidDownloadError(Error): """Exception for when the SDK that was download was invalid.""" def __init__(self): super(InvalidDownloadError, self).__init__( 'The Cloud SDK download was invalid.') class PermissionsError(Error): """Error for when a file operation cannot complete due to permissions.""" def __init__(self, message, path): """Initialize a PermissionsError. Args: message: str, The message from the underlying error. path: str, The absolute path to a file or directory that needs to be operated on, but can't because of insufficient permissions. """ super(PermissionsError, self).__init__( u'{message}: [{path}]\n\nEnsure you have the permissions to access the ' u'file and that the file is not in use.' .format(message=message, path=path)) def _RaisesPermissionsError(func): """Use this decorator for functions that deal with files. If an exception indicating file permissions is raised, this decorator will raise a PermissionsError instead, so that the caller only has to watch for one type of exception. Args: func: The function to decorate. Returns: A decorator. """ def _TryFunc(*args, **kwargs): try: return func(*args, **kwargs) except (OSError, IOError) as e: if e.errno == errno.EACCES: new_exc = PermissionsError( message=e.strerror, path=os.path.abspath(e.filename)) # Maintain original stack trace. raise new_exc, None, sys.exc_info()[2] raise except shutil.Error as e: args = e.args[0][0] # unfortunately shutil.Error *only* has formatted strings to inspect. # Looking for this substring is looking for errno.EACCES, which has # a numeric value of 13. if args[2].startswith('[Errno 13]'): new_exc = PermissionsError( message=args[2], path=os.path.abspath(args[0])) # Maintain original stack trace. raise new_exc, None, sys.exc_info()[2] raise return _TryFunc class InstallationState(object): """The main class for checking / updating local installation state.""" STATE_DIR_NAME = config.Paths.CLOUDSDK_STATE_DIR BACKUP_DIR_NAME = '.backup' TRASH_DIR_NAME = '.trash' STAGING_ROOT_SUFFIX = '.staging' COMPONENT_SNAPSHOT_FILE_SUFFIX = '.snapshot.json' @staticmethod def ForCurrent(): """Gets the installation state for the SDK that this code is running in. Returns: InstallationState, The state for this area. Raises: InvalidSDKRootError: If this code is not running under a valid SDK. """ sdk_root = config.Paths().sdk_root if not sdk_root: raise InvalidSDKRootError() return InstallationState(os.path.realpath(sdk_root)) def BackupInstallationState(self): """Gets the installation state for the backup of this state, if it exists. Returns: InstallationState, The state for this area or None if the backup does not exist. """ if not self.HasBackup(): return None return InstallationState(os.path.realpath(self.__backup_directory)) @staticmethod def VersionForInstalledComponent(component_id): """Gets the version string for the given installed component. This function is to be used to get component versions for metrics reporting. If it fails in any way or if the component_id is unknown, it will return None. This prevents errors from surfacing when the version is needed strictly for reporting purposes. Args: component_id: str, The component id of the component you want the version for. Returns: str, The installed version of the component, or None if it is not installed or if an error occurs. """ try: state = InstallationState.ForCurrent() # pylint: disable=protected-access, This is the same class. return InstallationManifest( state._state_directory, component_id).VersionString() # pylint: disable=bare-except, We never want to fail because of metrics. except: logging.debug('Failed to get installed version for component [%s]: [%s]', component_id, sys.exc_info()) return None @_RaisesPermissionsError def __init__(self, sdk_root): """Initializes the installation state for the given sdk install. Args: sdk_root: str, The file path of the root of the SDK installation. Raises: ValueError: If the given SDK root does not exist. """ if not os.path.isdir(sdk_root): raise ValueError('The given Cloud SDK root does not exist: [{0}]' .format(sdk_root)) self.__sdk_root = console_attr.DecodeFromInput(sdk_root) self._state_directory = os.path.join(sdk_root, InstallationState.STATE_DIR_NAME) self.__backup_directory = os.path.join(self._state_directory, InstallationState.BACKUP_DIR_NAME) self.__trash_directory = os.path.join(self._state_directory, InstallationState.TRASH_DIR_NAME) self.__sdk_staging_root = (os.path.normpath(self.__sdk_root) + InstallationState.STAGING_ROOT_SUFFIX) @_RaisesPermissionsError def _CreateStateDir(self): """Creates the state directory if it does not exist.""" if not os.path.isdir(self._state_directory): file_utils.MakeDir(self._state_directory) @property def sdk_root(self): """Gets the root of the SDK that this state corresponds to. Returns: str, the path to the root directory. """ return self.__sdk_root def _FilesForSuffix(self, suffix): """Returns the files in the state directory that have the given suffix. Args: suffix: str, The file suffix to match on. Returns: list of str, The file names that match. """ if not os.path.isdir(self._state_directory): return [] files = os.listdir(self._state_directory) matching = [f for f in files if os.path.isfile(os.path.join(self._state_directory, f)) and f.endswith(suffix)] return matching @_RaisesPermissionsError def InstalledComponents(self): """Gets all the components that are currently installed. Returns: A dictionary of component id string to InstallationManifest. """ snapshot_files = self._FilesForSuffix( InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX) manifests = {} for f in snapshot_files: component_id = f[:-len(InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)] manifests[component_id] = InstallationManifest(self._state_directory, component_id) return manifests @_RaisesPermissionsError def Snapshot(self): """Generates a ComponentSnapshot from the currently installed components.""" return snapshots.ComponentSnapshot.FromInstallState(self) def DiffCurrentState(self, latest_snapshot, platform_filter=None): """Generates a ComponentSnapshotDiff from current state and the given state. Args: latest_snapshot: snapshots.ComponentSnapshot, The current state of the world to diff against. platform_filter: platforms.Platform, A platform that components must match in order to be considered for any operations. Returns: A ComponentSnapshotDiff. """ return self.Snapshot().CreateDiff(latest_snapshot, platform_filter=platform_filter) @_RaisesPermissionsError def CloneToStaging(self, progress_callback=None): """Clones this state to the temporary staging area. This is used for making temporary copies of the entire Cloud SDK installation when doing updates. The entire installation is cloned, but doing so removes any backups and trash from this state before doing the copy. Args: progress_callback: f(float), A function to call with the fraction of completeness. Returns: An InstallationState object for the cloned install. """ self._CreateStateDir() (rm_staging_cb, rm_backup_cb, rm_trash_cb, copy_cb) = ( console_io.ProgressBar.SplitProgressBar(progress_callback, [1, 1, 1, 7])) self._ClearStaging(progress_callback=rm_staging_cb) self.ClearBackup(progress_callback=rm_backup_cb) self.ClearTrash(progress_callback=rm_trash_cb) class Counter(object): def __init__(self, progress_callback, total): self.count = 0 self.progress_callback = progress_callback self.total = float(total) # This function must match the signature that shutil expects for the # ignore function. def Tick(self, *unused_args): self.count += 1 self.progress_callback(self.count / self.total) return [] if progress_callback: # This takes a little time, so only do it if we are going to report # progress. dirs = set() for _, manifest in self.InstalledComponents().iteritems(): dirs.update(manifest.InstalledDirectories()) # There is always the root directory itself and the .install directory. # In general, there could be in the SDK (if people just put stuff in there # but this is fine for an estimate. The progress bar will at worst stay # at 100% for slightly longer. total_dirs = len(dirs) + 2 ticker = Counter(copy_cb, total_dirs).Tick if total_dirs else None else: ticker = None shutil.copytree(self.__sdk_root, self.__sdk_staging_root, symlinks=True, ignore=ticker) staging_state = InstallationState(self.__sdk_staging_root) # pylint: disable=protected-access, This is an instance of InstallationState staging_state._CreateStateDir() return staging_state @_RaisesPermissionsError def CreateStagingFromDownload(self, url, progress_callback=None): """Creates a new staging area from a fresh download of the Cloud SDK. Args: url: str, The url to download the new SDK from. progress_callback: f(float), A function to call with the fraction of completeness. Returns: An InstallationState object for the new install. Raises: installers.URLFetchError: If the new SDK could not be downloaded. InvalidDownloadError: If the new SDK was malformed. """ self._ClearStaging() with file_utils.TemporaryDirectory() as t: download_dir = os.path.join(t, '.download') extract_dir = os.path.join(t, '.extract') installers.ComponentInstaller.DownloadAndExtractTar( url, download_dir, extract_dir, progress_callback=progress_callback, command_path='components.reinstall') files = os.listdir(extract_dir) if len(files) != 1: raise InvalidDownloadError() sdk_root = os.path.join(extract_dir, files[0]) file_utils.MoveDir(sdk_root, self.__sdk_staging_root) staging_sdk = InstallationState(self.__sdk_staging_root) # pylint: disable=protected-access, This is an instance of InstallationState staging_sdk._CreateStateDir() self.CopyMachinePropertiesTo(staging_sdk) return staging_sdk @_RaisesPermissionsError def ReplaceWith(self, other_install_state, progress_callback=None): """Replaces this installation with the given other installation. This moves the current installation to the backup directory of the other installation. Then, it moves the entire second installation to replace this one on the file system. The result is that the other installation completely replaces the current one, but the current one is snapshotted and stored as a backup under the new one (and can be restored later). Args: other_install_state: InstallationState, The other state with which to replace this one. progress_callback: f(float), A function to call with the fraction of completeness. """ self._CreateStateDir() self.ClearBackup() self.ClearTrash() # pylint: disable=protected-access, This is an instance of InstallationState other_install_state._CreateStateDir() other_install_state.ClearBackup() # pylint: disable=protected-access, This is an instance of InstallationState file_utils.MoveDir(self.__sdk_root, other_install_state.__backup_directory) if progress_callback: progress_callback(0.5) file_utils.MoveDir(other_install_state.__sdk_root, self.__sdk_root) if progress_callback: progress_callback(1.0) @_RaisesPermissionsError def RestoreBackup(self): """Restore the backup from this install state if it exists. If this installation has a backup stored in it (created by and update that used ReplaceWith(), above), it replaces this installation with the backup, using a temporary staging area. This installation is moved to the trash directory under the installation that exists after this is done. The trash directory can be removed at any point in the future. We just don't want to delete code that is running since some platforms have a problem with that. Returns: bool, True if there was a backup to restore, False otherwise. """ if not self.HasBackup(): return False self._ClearStaging() file_utils.MoveDir(self.__backup_directory, self.__sdk_staging_root) staging_state = InstallationState(self.__sdk_staging_root) # pylint: disable=protected-access, This is an instance of InstallationState staging_state._CreateStateDir() staging_state.ClearTrash() # pylint: disable=protected-access, This is an instance of InstallationState file_utils.MoveDir(self.__sdk_root, staging_state.__trash_directory) file_utils.MoveDir(staging_state.__sdk_root, self.__sdk_root) return True def HasBackup(self): """Determines if this install has a valid backup that can be restored. Returns: bool, True if there is a backup, False otherwise. """ return os.path.isdir(self.__backup_directory) def BackupDirectory(self): """Gets the backup directory of this installation if it exists. Returns: str, The path to the backup directory or None if it does not exist. """ if self.HasBackup(): return self.__backup_directory return None @_RaisesPermissionsError def _ClearStaging(self, progress_callback=None): """Deletes the current staging directory if it exists. Args: progress_callback: f(float), A function to call with the fraction of completeness. """ if os.path.exists(self.__sdk_staging_root): file_utils.RmTree(self.__sdk_staging_root) if progress_callback: progress_callback(1) @_RaisesPermissionsError def ClearBackup(self, progress_callback=None): """Deletes the current backup if it exists. Args: progress_callback: f(float), A function to call with the fraction of completeness. """ if os.path.isdir(self.__backup_directory): file_utils.RmTree(self.__backup_directory) if progress_callback: progress_callback(1) @_RaisesPermissionsError def ClearTrash(self, progress_callback=None): """Deletes the current trash directory if it exists. Args: progress_callback: f(float), A function to call with the fraction of completeness. """ if os.path.isdir(self.__trash_directory): file_utils.RmTree(self.__trash_directory) if progress_callback: progress_callback(1) def _GetInstaller(self, snapshot): """Gets a component installer based on the given snapshot. Args: snapshot: snapshots.ComponentSnapshot, The snapshot that describes the component to install. Returns: The installers.ComponentInstaller. """ return installers.ComponentInstaller(self.__sdk_root, self._state_directory, snapshot) @_RaisesPermissionsError def Install(self, snapshot, component_id, progress_callback=None, command_path='unknown'): """Installs the given component based on the given snapshot. Args: snapshot: snapshots.ComponentSnapshot, The snapshot that describes the component to install. component_id: str, The component to install from the given snapshot. progress_callback: f(float), A function to call with the fraction of completeness. command_path: the command path to include in the User-Agent header if the URL is HTTP Raises: installers.URLFetchError: If the component associated with the provided component ID has a URL that is not fetched correctly. """ self._CreateStateDir() files = self._GetInstaller(snapshot).Install( component_id, progress_callback=progress_callback, command_path=command_path) manifest = InstallationManifest(self._state_directory, component_id) manifest.MarkInstalled(snapshot, files) @_RaisesPermissionsError def Uninstall(self, component_id, progress_callback=None): """Uninstalls the given component. Deletes all the files for this component and marks it as no longer being installed. Args: component_id: str, The id of the component to uninstall. progress_callback: f(float), A function to call with the fraction of completeness. """ manifest = InstallationManifest(self._state_directory, component_id) paths = manifest.InstalledPaths() total_paths = float(len(paths)) root = self.__sdk_root dirs_to_remove = set() for num, p in enumerate(paths, start=1): path = os.path.join(root, p) if os.path.isfile(path) or os.path.islink(path): os.remove(path) # Clean up the pyc files that correspond to any py files being removed. if p.endswith('.py'): pyc_path = path + 'c' if os.path.isfile(pyc_path): os.remove(pyc_path) dir_path = os.path.dirname(path) if dir_path: dirs_to_remove.add(os.path.normpath(dir_path)) elif os.path.isdir(path): dirs_to_remove.add(os.path.normpath(path)) if progress_callback: progress_callback(num / total_paths) # Remove dirs from the bottom up. Subdirs will always have a longer path # than it's parent. for d in sorted(dirs_to_remove, key=len, reverse=True): if os.path.isdir(d) and not os.path.islink(d) and not os.listdir(d): os.rmdir(d) manifest.MarkUninstalled() def CopyMachinePropertiesTo(self, other_state): """Copy this state's properties file to another state. This is primarily intended to be used to maintain the machine properties file during a schema-change-induced reinstall. Args: other_state: InstallationState, The installation state of the fresh Cloud SDK that needs the properties file mirrored in. """ my_properties = os.path.join( self.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME) other_properties = os.path.join( other_state.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME) if not os.path.exists(my_properties): return shutil.copyfile(my_properties, other_properties) def CompilePythonFiles(self): """Attempts to compile all the python files into .pyc files. This does not raise exceptions if compiling a given file fails. """ # The self.sdk_root pathname could contain unicode chars and py_compile # chokes on unicode paths. Using relative paths from self.sdk_root works # around the problem. with file_utils.ChDir(self.sdk_root): to_compile = [ os.path.join('bin', 'bootstrapping'), 'lib', 'platform', ] for d in to_compile: d = console_attr.DecodeFromInput(d) compileall.compile_dir(d, quiet=True) class InstallationManifest(object): """Class to encapsulate the data stored in installation manifest files.""" MANIFEST_SUFFIX = '.manifest' def __init__(self, state_dir, component_id): """Creates a new InstallationManifest. Args: state_dir: str, The directory path where install state is stored. component_id: str, The component id that you want to get the manifest for. """ self.state_dir = state_dir self.id = component_id self.snapshot_file = os.path.join( self.state_dir, component_id + InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX) self.manifest_file = os.path.join( self.state_dir, component_id + InstallationManifest.MANIFEST_SUFFIX) def MarkInstalled(self, snapshot, files): """Marks this component as installed with the given snapshot and files. This saves the ComponentSnapshot and writes the installed files to a manifest so they can be removed later. Args: snapshot: snapshots.ComponentSnapshot, The snapshot that was the source of the install. files: list of str, The files that were created by the installation. """ with open(self.manifest_file, 'w') as fp: for f in files: fp.write(f + '\n') snapshot.WriteToFile(self.snapshot_file) def MarkUninstalled(self): """Marks this component as no longer being installed. This does not actually uninstall the component, but rather just removes the snapshot and manifest. """ for f in [self.manifest_file, self.snapshot_file]: if os.path.isfile(f): os.remove(f) def ComponentSnapshot(self): """Loads the local ComponentSnapshot for this component. Returns: The snapshots.ComponentSnapshot for this component. """ return snapshots.ComponentSnapshot.FromFile(self.snapshot_file) def ComponentDefinition(self): """Loads the ComponentSnapshot and get the schemas.Component this component. Returns: The schemas.Component for this component. """ return self.ComponentSnapshot().ComponentFromId(self.id) def VersionString(self): """Gets the version string of this component as it was installed. Returns: str, The installed version of this component. """ return self.ComponentDefinition().version.version_string def InstalledPaths(self): """Gets the list of files and dirs created by installing this component. Returns: list of str, The files and directories installed by this component. """ with open(self.manifest_file) as f: files = [line.rstrip() for line in f] return files def InstalledDirectories(self): """Gets the set of directories created by installing this component. Returns: set(str), The directories installed by this component. """ with open(self.manifest_file) as f: dirs = set() for line in f: fixed = line.rstrip() if fixed.endswith('/'): dirs.add(fixed) return dirs
#!/usr/bin/env python # -*- coding: utf-8 -*- """ PyTorch implementation of a simple 2-layer-deep LSTM for genre classification of musical audio. Feeding the LSTM stack are spectral {centroid, contrast}, chromagram & MFCC features (33 total values) Question: Why is there a PyTorch implementation, when we already have Keras/Tensorflow? Answer: So that we can learn more PyTorch and experiment with modulations on basic architectures within the space of an "easy problem". For example, SRU or SincNets. I'm am also curious about the relative performances of both toolkits. """ import os import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from GenreFeatureData import ( GenreFeatureData, ) # local python class with Audio feature extraction (librosa) # class definition class LSTM(nn.Module): def __init__(self, input_dim, hidden_dim, batch_size, output_dim=8, num_layers=2): super(LSTM, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.batch_size = batch_size self.num_layers = num_layers # setup LSTM layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers) # setup output layer self.linear = nn.Linear(self.hidden_dim, output_dim) def forward(self, input, hidden=None): # lstm step => then ONLY take the sequence's final timetep to pass into the linear/dense layer # Note: lstm_out contains outputs for every step of the sequence we are looping over (for BPTT) # but we just need the output of the last step of the sequence, aka lstm_out[-1] lstm_out, hidden = self.lstm(input, hidden) logits = self.linear(lstm_out[-1]) # equivalent to return_sequences=False from Keras genre_scores = F.log_softmax(logits, dim=1) return genre_scores, hidden def get_accuracy(self, logits, target): """ compute accuracy for training round """ corrects = ( torch.max(logits, 1)[1].view(target.size()).data == target.data ).sum() accuracy = 100.0 * corrects / self.batch_size return accuracy.item() def main(): genre_features = GenreFeatureData() # if all of the preprocessed files do not exist, regenerate them all for self-consistency if ( os.path.isfile(genre_features.train_X_preprocessed_data) and os.path.isfile(genre_features.train_Y_preprocessed_data) and os.path.isfile(genre_features.dev_X_preprocessed_data) and os.path.isfile(genre_features.dev_Y_preprocessed_data) and os.path.isfile(genre_features.test_X_preprocessed_data) and os.path.isfile(genre_features.test_Y_preprocessed_data) ): print("Preprocessed files exist, deserializing npy files") genre_features.load_deserialize_data() else: print("Preprocessing raw audio files") genre_features.load_preprocess_data() train_X = torch.from_numpy(genre_features.train_X).type(torch.Tensor) dev_X = torch.from_numpy(genre_features.dev_X).type(torch.Tensor) test_X = torch.from_numpy(genre_features.test_X).type(torch.Tensor) # Targets is a long tensor of size (N,) which tells the true class of the sample. train_Y = torch.from_numpy(genre_features.train_Y).type(torch.LongTensor) dev_Y = torch.from_numpy(genre_features.dev_Y).type(torch.LongTensor) test_Y = torch.from_numpy(genre_features.test_Y).type(torch.LongTensor) # Convert {training, test} torch.Tensors print("Training X shape: " + str(genre_features.train_X.shape)) print("Training Y shape: " + str(genre_features.train_Y.shape)) print("Validation X shape: " + str(genre_features.dev_X.shape)) print("Validation Y shape: " + str(genre_features.dev_Y.shape)) print("Test X shape: " + str(genre_features.test_X.shape)) print("Test Y shape: " + str(genre_features.test_Y.shape)) batch_size = 35 # num of training examples per minibatch num_epochs = 400 # Define model print("Build LSTM RNN model ...") model = LSTM( input_dim=33, hidden_dim=128, batch_size=batch_size, output_dim=8, num_layers=2 ) loss_function = nn.NLLLoss() # expects ouputs from LogSoftmax optimizer = optim.Adam(model.parameters(), lr=0.001) # To keep LSTM stateful between batches, you can set stateful = True, which is not suggested for training stateful = False train_on_gpu = torch.cuda.is_available() if train_on_gpu: print("\nTraining on GPU") else: print("\nNo GPU, training on CPU") # all training data (epoch) / batch_size == num_batches (12) num_batches = int(train_X.shape[0] / batch_size) num_dev_batches = int(dev_X.shape[0] / batch_size) val_loss_list, val_accuracy_list, epoch_list = [], [], [] print("Training ...") for epoch in range(num_epochs): train_running_loss, train_acc = 0.0, 0.0 # Init hidden state - if you don't want a stateful LSTM (between epochs) hidden_state = None for i in range(num_batches): # zero out gradient, so they don't accumulate btw batches model.zero_grad() # train_X shape: (total # of training examples, sequence_length, input_dim) # train_Y shape: (total # of training examples, # output classes) # # Slice out local minibatches & labels => Note that we *permute* the local minibatch to # match the PyTorch expected input tensor format of (sequence_length, batch size, input_dim) X_local_minibatch, y_local_minibatch = ( train_X[i * batch_size: (i + 1) * batch_size, ], train_Y[i * batch_size: (i + 1) * batch_size, ], ) # Reshape input & targets to "match" what the loss_function wants X_local_minibatch = X_local_minibatch.permute(1, 0, 2) # NLLLoss does not expect a one-hot encoded vector as the target, but class indices y_local_minibatch = torch.max(y_local_minibatch, 1)[1] y_pred, hidden_state = model(X_local_minibatch, hidden_state) # forward pass # Stateful = False for training. Do we go Stateful = True during inference/prediction time? if not stateful: hidden_state = None else: h_0, c_0 = hidden_state h_0.detach_(), c_0.detach_() hidden_state = (h_0, c_0) loss = loss_function(y_pred, y_local_minibatch) # compute loss loss.backward() # backward pass optimizer.step() # parameter update train_running_loss += loss.detach().item() # unpacks the tensor into a scalar value train_acc += model.get_accuracy(y_pred, y_local_minibatch) print( "Epoch: %d | NLLoss: %.4f | Train Accuracy: %.2f" % (epoch, train_running_loss / num_batches, train_acc / num_batches) ) if epoch % 10 == 0: print("Validation ...") # should this be done every N=10 epochs val_running_loss, val_acc = 0.0, 0.0 # Compute validation loss, accuracy. Use torch.no_grad() & model.eval() with torch.no_grad(): model.eval() hidden_state = None for i in range(num_dev_batches): X_local_validation_minibatch, y_local_validation_minibatch = ( dev_X[i * batch_size: (i + 1) * batch_size, ], dev_Y[i * batch_size: (i + 1) * batch_size, ], ) X_local_minibatch = X_local_validation_minibatch.permute(1, 0, 2) y_local_minibatch = torch.max(y_local_validation_minibatch, 1)[1] y_pred, hidden_state = model(X_local_minibatch, hidden_state) if not stateful: hidden_state = None val_loss = loss_function(y_pred, y_local_minibatch) val_running_loss += ( val_loss.detach().item() ) # unpacks the tensor into a scalar value val_acc += model.get_accuracy(y_pred, y_local_minibatch) model.train() # reset to train mode after iterationg through validation data print( "Epoch: %d | NLLoss: %.4f | Train Accuracy: %.2f | Val Loss %.4f | Val Accuracy: %.2f" % ( epoch, train_running_loss / num_batches, train_acc / num_batches, val_running_loss / num_dev_batches, val_acc / num_dev_batches, ) ) epoch_list.append(epoch) val_accuracy_list.append(val_acc / num_dev_batches) val_loss_list.append(val_running_loss / num_dev_batches) # visualization loss plt.plot(epoch_list, val_loss_list) plt.xlabel("# of epochs") plt.ylabel("Loss") plt.title("LSTM: Loss vs # epochs") plt.show() # visualization accuracy plt.plot(epoch_list, val_accuracy_list, color="red") plt.xlabel("# of epochs") plt.ylabel("Accuracy") plt.title("LSTM: Accuracy vs # epochs") # plt.savefig('graph.png') plt.show() if __name__ == "__main__": main()
import pytest import numpy as np from datetime import timedelta import pandas as pd import pandas.util.testing as tm from pandas import (timedelta_range, date_range, Series, Timedelta, DatetimeIndex, TimedeltaIndex, Index, DataFrame, Int64Index) from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_index_equal) from ..datetimelike import DatetimeLike randn = np.random.randn class TestTimedeltaIndex(DatetimeLike): _holder = TimedeltaIndex _multiprocess_can_split_ = True def setup_method(self, method): self.indices = dict(index=tm.makeTimedeltaIndex(10)) self.setup_indices() def create_index(self): return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) def test_shift(self): # test shift for TimedeltaIndex # err8083 drange = self.create_index() result = drange.shift(1) expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00', '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00'], freq='D') tm.assert_index_equal(result, expected) result = drange.shift(3, freq='2D 1s') expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03', '8 days 01:00:03', '9 days 01:00:03', '10 days 01:00:03'], freq='D') tm.assert_index_equal(result, expected) def test_get_loc(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days']) for method in [None, 'pad', 'backfill', 'nearest']: assert idx.get_loc(idx[1], method) == 1 assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1 assert idx.get_loc(str(idx[1]), method) == 1 assert idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)) == 1 assert idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')) == 1 assert idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)) == 1 with tm.assert_raises_regex(ValueError, 'unit abbreviation w/o a number'): idx.get_loc(idx[1], method='nearest', tolerance='foo') with pytest.raises( ValueError, match='tolerance size must match'): idx.get_loc(idx[1], method='nearest', tolerance=[Timedelta(0).to_timedelta64(), Timedelta(0).to_timedelta64()]) for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc('1 day 1 hour', method) == loc # GH 16909 assert idx.get_loc(idx[1].to_timedelta64()) == 1 # GH 16896 assert idx.get_loc('0 days') == 0 def test_get_loc_nat(self): tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00']) assert tidx.get_loc(pd.NaT) == 1 assert tidx.get_loc(None) == 1 assert tidx.get_loc(float('nan')) == 1 assert tidx.get_loc(np.nan) == 1 def test_get_indexer(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days']) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)) target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.intp)) res = idx.get_indexer(target, 'nearest', tolerance=pd.Timedelta('1 hour')) tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp)) def test_numeric_compat(self): idx = self._holder(np.arange(5, dtype='int64')) didx = self._holder(np.arange(5, dtype='int64') ** 2) result = idx * 1 tm.assert_index_equal(result, idx) result = 1 * idx tm.assert_index_equal(result, idx) result = idx / 1 tm.assert_index_equal(result, idx) result = idx // 1 tm.assert_index_equal(result, idx) result = idx * np.array(5, dtype='int64') tm.assert_index_equal(result, self._holder(np.arange(5, dtype='int64') * 5)) result = idx * np.arange(5, dtype='int64') tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype='int64')) tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype='float64') + 0.1) tm.assert_index_equal(result, self._holder(np.arange( 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1))) # invalid pytest.raises(TypeError, lambda: idx * idx) pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3))) pytest.raises(ValueError, lambda: idx * np.array([1, 2])) def test_pickle_compat_construction(self): pass def test_ufunc_coercions(self): # normal ops are also tested in tseries/test_timedeltas.py idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'], freq='2H', name='x') for result in [idx * 2, np.multiply(idx, 2)]: assert isinstance(result, TimedeltaIndex) exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'], freq='4H', name='x') tm.assert_index_equal(result, exp) assert result.freq == '4H' for result in [idx / 2, np.divide(idx, 2)]: assert isinstance(result, TimedeltaIndex) exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'], freq='H', name='x') tm.assert_index_equal(result, exp) assert result.freq == 'H' idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'], freq='2H', name='x') for result in [-idx, np.negative(idx)]: assert isinstance(result, TimedeltaIndex) exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'], freq='-2H', name='x') tm.assert_index_equal(result, exp) assert result.freq == '-2H' idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'], freq='H', name='x') for result in [abs(idx), np.absolute(idx)]: assert isinstance(result, TimedeltaIndex) exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'], freq=None, name='x') tm.assert_index_equal(result, exp) assert result.freq is None def test_fillna_timedelta(self): # GH 11343 idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day']) exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day']) tm.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp) exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day']) idx.fillna(pd.Timedelta('3 hour')) exp = pd.Index( [pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object) tm.assert_index_equal(idx.fillna('x'), exp) def test_difference_freq(self): # GH14323: Difference of TimedeltaIndex should not preserve frequency index = timedelta_range("0 days", "5 days", freq="D") other = timedelta_range("1 days", "4 days", freq="D") expected = TimedeltaIndex(["0 days", "5 days"], freq=None) idx_diff = index.difference(other) tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal('freq', idx_diff, expected) other = timedelta_range("2 days", "5 days", freq="D") idx_diff = index.difference(other) expected = TimedeltaIndex(["0 days", "1 days"], freq=None) tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal('freq', idx_diff, expected) def test_take(self): tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00'] idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx') expected = TimedeltaIndex(tds, freq=None, name='idx') taken1 = idx.take([2, 4, 10]) taken2 = idx[[2, 4, 10]] for taken in [taken1, taken2]: tm.assert_index_equal(taken, expected) assert isinstance(taken, TimedeltaIndex) assert taken.freq is None assert taken.name == expected.name def test_take_fill_value(self): # GH 12631 idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'], name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -2]), fill_value=True) with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_isin(self): index = tm.makeTimedeltaIndex(4) result = index.isin(index) assert result.all() result = index.isin(list(index)) assert result.all() assert_almost_equal(index.isin([index[2], 5]), np.array([False, False, True, False])) def test_factorize(self): idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day', '3 day']) exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp) exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day']) arr, idx = idx1.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) arr, idx = idx1.factorize(sort=True) tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) # freq must be preserved idx3 = timedelta_range('1 day', periods=4, freq='s') exp_arr = np.array([0, 1, 2, 3], dtype=np.intp) arr, idx = idx3.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, idx3) def test_join_self(self): index = timedelta_range('1 day', periods=10) kinds = 'outer', 'inner', 'left', 'right' for kind in kinds: joined = index.join(index, how=kind) tm.assert_index_equal(index, joined) def test_slice_keeps_name(self): # GH4226 dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket') assert dr[1:].name == dr.name def test_does_not_convert_mixed_integer(self): df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs: randn(), r_idx_type='i', c_idx_type='td') str(df) cols = df.columns.join(df.index, how='outer') joined = cols.join(df.columns) assert cols.dtype == np.dtype('O') assert cols.dtype == joined.dtype tm.assert_index_equal(cols, joined) def test_sort_values(self): idx = TimedeltaIndex(['4d', '1d', '2d']) ordered = idx.sort_values() assert ordered.is_monotonic ordered = idx.sort_values(ascending=False) assert ordered[::-1].is_monotonic ordered, dexer = idx.sort_values(return_indexer=True) assert ordered.is_monotonic tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]), check_dtype=False) ordered, dexer = idx.sort_values(return_indexer=True, ascending=False) assert ordered[::-1].is_monotonic tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]), check_dtype=False) def test_get_duplicates(self): idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day', '4day']) result = idx.get_duplicates() ex = TimedeltaIndex(['2 day', '3day']) tm.assert_index_equal(result, ex) def test_argmin_argmax(self): idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01', '1 day 00:00:02']) assert idx.argmin() == 1 assert idx.argmax() == 0 def test_misc_coverage(self): rng = timedelta_range('1 day', periods=5) result = rng.groupby(rng.days) assert isinstance(list(result.values())[0][0], Timedelta) idx = TimedeltaIndex(['3d', '1d', '2d']) assert not idx.equals(list(idx)) non_td = Index(list('abc')) assert not idx.equals(list(non_td)) def test_map(self): rng = timedelta_range('1 day', periods=10) f = lambda x: x.days result = rng.map(f) exp = Int64Index([f(x) for x in rng]) tm.assert_index_equal(result, exp) def test_comparisons_nat(self): tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT, '1 day 00:00:01', '5 day 00:00:03']) tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT, '1 day 00:00:02', '5 days 00:00:03']) tdarr = np.array([np.timedelta64(2, 'D'), np.timedelta64(2, 'D'), np.timedelta64('nat'), np.timedelta64('nat'), np.timedelta64(1, 'D') + np.timedelta64(2, 's'), np.timedelta64(5, 'D') + np.timedelta64(3, 's')]) cases = [(tdidx1, tdidx2), (tdidx1, tdarr)] # Check pd.NaT is handles as the same as np.nan for idx1, idx2 in cases: result = idx1 < idx2 expected = np.array([True, False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) result = idx2 > idx1 expected = np.array([True, False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) result = idx1 <= idx2 expected = np.array([True, False, False, False, True, True]) tm.assert_numpy_array_equal(result, expected) result = idx2 >= idx1 expected = np.array([True, False, False, False, True, True]) tm.assert_numpy_array_equal(result, expected) result = idx1 == idx2 expected = np.array([False, False, False, False, False, True]) tm.assert_numpy_array_equal(result, expected) result = idx1 != idx2 expected = np.array([True, True, True, True, True, False]) tm.assert_numpy_array_equal(result, expected) def test_comparisons_coverage(self): rng = timedelta_range('1 days', periods=10) result = rng < rng[3] exp = np.array([True, True, True] + [False] * 7) tm.assert_numpy_array_equal(result, exp) # raise TypeError for now pytest.raises(TypeError, rng.__lt__, rng[3].value) result = rng == list(rng) exp = rng == rng tm.assert_numpy_array_equal(result, exp) def test_total_seconds(self): # GH 10939 # test index rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s') expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9, 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9] tm.assert_almost_equal(rng.total_seconds(), Index(expt)) # test Series s = Series(rng) s_expt = Series(expt, index=[0, 1]) tm.assert_series_equal(s.dt.total_seconds(), s_expt) # with nat s[1] = np.nan s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9, np.nan], index=[0, 1]) tm.assert_series_equal(s.dt.total_seconds(), s_expt) # with both nat s = Series([np.nan, np.nan], dtype='timedelta64[ns]') tm.assert_series_equal(s.dt.total_seconds(), Series([np.nan, np.nan], index=[0, 1])) def test_pass_TimedeltaIndex_to_index(self): rng = timedelta_range('1 days', '10 days') idx = Index(rng, dtype=object) expected = Index(rng.to_pytimedelta(), dtype=object) tm.assert_numpy_array_equal(idx.values, expected.values) def test_pickle(self): rng = timedelta_range('1 days', periods=10) rng_p = tm.round_trip_pickle(rng) tm.assert_index_equal(rng, rng_p) def test_hash_error(self): index = timedelta_range('1 days', periods=10) with tm.assert_raises_regex(TypeError, "unhashable type: %r" % type(index).__name__): hash(index) def test_append_join_nondatetimeindex(self): rng = timedelta_range('1 days', periods=10) idx = Index(['a', 'b', 'c', 'd']) result = rng.append(idx) assert isinstance(result[0], Timedelta) # it works rng.join(idx, how='outer') def test_append_numpy_bug_1681(self): td = timedelta_range('1 days', '10 days', freq='2D') a = DataFrame() c = DataFrame({'A': 'foo', 'B': td}, index=td) str(c) result = a.append(c) assert (result['B'] == td).all() def test_fields(self): rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s') tm.assert_index_equal(rng.days, Index([1, 1], dtype='int64')) tm.assert_index_equal( rng.seconds, Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype='int64')) tm.assert_index_equal( rng.microseconds, Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64')) tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype='int64')) pytest.raises(AttributeError, lambda: rng.hours) pytest.raises(AttributeError, lambda: rng.minutes) pytest.raises(AttributeError, lambda: rng.milliseconds) # with nat s = Series(rng) s[1] = np.nan tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1])) tm.assert_series_equal(s.dt.seconds, Series( [10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1])) # preserve name (GH15589) rng.name = 'name' assert rng.days.name == 'name' def test_freq_conversion(self): # doc example # series td = Series(date_range('20130101', periods=4)) - \ Series(date_range('20121201', periods=4)) td[2] += timedelta(minutes=5, seconds=3) td[3] = np.nan result = td / np.timedelta64(1, 'D') expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan ]) assert_series_equal(result, expected) result = td.astype('timedelta64[D]') expected = Series([31, 31, 31, np.nan]) assert_series_equal(result, expected) result = td / np.timedelta64(1, 's') expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan]) assert_series_equal(result, expected) result = td.astype('timedelta64[s]') assert_series_equal(result, expected) # tdi td = TimedeltaIndex(td) result = td / np.timedelta64(1, 'D') expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan]) assert_index_equal(result, expected) result = td.astype('timedelta64[D]') expected = Index([31, 31, 31, np.nan]) assert_index_equal(result, expected) result = td / np.timedelta64(1, 's') expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan]) assert_index_equal(result, expected) result = td.astype('timedelta64[s]') assert_index_equal(result, expected) class TestSlicing(object): @pytest.mark.parametrize('freq', ['B', 'D']) def test_timedelta(self, freq): index = date_range('1/1/2000', periods=50, freq=freq) shifted = index + timedelta(1) back = shifted + timedelta(-1) tm.assert_index_equal(index, back) if freq == 'D': expected = pd.tseries.offsets.Day(1) assert index.freq == expected assert shifted.freq == expected assert back.freq == expected else: # freq == 'B' assert index.freq == pd.tseries.offsets.BusinessDay(1) assert shifted.freq is None assert back.freq == pd.tseries.offsets.BusinessDay(1) result = index - timedelta(1) expected = index + timedelta(-1) tm.assert_index_equal(result, expected) # GH4134, buggy with timedeltas rng = date_range('2013', '2014') s = Series(rng) result1 = rng - pd.offsets.Hour(1) result2 = DatetimeIndex(s - np.timedelta64(100000000)) result3 = rng - np.timedelta64(100000000) result4 = DatetimeIndex(s - pd.offsets.Hour(1)) tm.assert_index_equal(result1, result4) tm.assert_index_equal(result2, result3) class TestTimeSeries(object): _multiprocess_can_split_ = True def test_series_box_timedelta(self): rng = timedelta_range('1 day 1 s', periods=5, freq='h') s = Series(rng) assert isinstance(s[1], Timedelta) assert isinstance(s.iat[2], Timedelta)
from datetime import datetime, timedelta import os import sys import subprocess import hashlib import re from io import BytesIO from contextlib import closing import yaml import requests from tqdm import tqdm from .utils import ydump, mkdir_p, slugify TRANSITFEEDS_API_URL = 'https://api.transitfeeds.com/v1/' def get_transitfeeds_locations(location_ids): for location_id in location_ids: response = requests.get(TRANSITFEEDS_API_URL + 'getFeeds', params={ 'key': os.environ.get('TRANSITFEED_API_KEY'), 'location': location_id, 'descendants': 1, 'page': 1, 'limit': 50, 'type': 'gtfs' }) for feed in response.json()['results']['feeds']: yield feed['id'] def download_transitfeeds_feed(feed_id): try: return download_url(TRANSITFEEDS_API_URL + 'getLatestFeedVersion', params={ 'key': os.environ.get('TRANSITFEED_API_KEY'), 'feed': feed_id }) except ValueError: return None, None def download_with_script(script, basepath): print('Starting script', script) proc = subprocess.Popen(script, shell=True, cwd=basepath, stdout=subprocess.PIPE, stderr=subprocess.PIPE) outs, errs = proc.communicate() out_file = outs.decode('utf-8').splitlines()[-1] print('Saving', out_file) out_file = os.path.join(basepath, out_file) with open(out_file, 'rb') as f: return BytesIO(f.read()), None def download_url(url, params=None): response_content = BytesIO() with closing(requests.get(url, params=params, stream=True)) as response: if response.headers.get('Content-Type') == 'application/json': raise ValueError('API error') size = None if response.headers.get('Content-Length'): size = int(response.headers.get('Content-Length').strip()) with tqdm(total=size, unit='B', unit_scale=True) as pbar: for buf in response.iter_content(1024): response_content.write(buf) pbar.update(len(buf)) url = response.url return response_content, url def main(path): basepath = os.path.abspath(path) cityid = os.path.basename(basepath) filename = os.path.join(basepath, '{cityid}.md'.format(cityid=cityid)) if not os.path.exists(filename): print('Filename does not exist', filename) return with open(filename) as f: for doc in yaml.load_all(f): data = doc break data_path = os.path.join(path, 'data') mkdir_p(data_path) now = datetime.utcnow() age = timedelta(days=7) new_files = False tf_location_ids = data.get('tf_location_ids') if tf_location_ids is not None: data.setdefault('gtfs', {}) feed_ids = get_transitfeeds_locations(tf_location_ids) for feed_id in feed_ids: slug_feed_id = slugify(feed_id) data['gtfs'][slug_feed_id] = {'tf_feed_id': feed_id} if 'gtfs' not in data: print('No GTFS key', filename) return gtfs = data['gtfs'] if not gtfs: print('No GTFS feeds present', filename) for key in gtfs.keys(): print('Checking', key) info = gtfs[key] if isinstance(info, str): gtfs[key] = {'url': info} info = gtfs[key] current_hash_sum = info.get('sha256') data_file_path = os.path.join(data_path, '{}.zip'.format(key)) if os.path.exists(data_file_path): statinfo = os.stat(data_file_path) last_modified = datetime.fromtimestamp(statinfo.st_mtime) if last_modified + age > now: print(f'Skipping {key}, less than 7 days old...') continue if info.get('script') is not None: response_content, final_url = download_with_script(info['script'], basepath) elif info.get('tf_feed_id') is not None: response_content, final_url = download_transitfeeds_feed(info['tf_feed_id']) elif info.get('url'): url = info['url'] response_content, final_url = download_url(url) elif info.get('file'): final_url = None with open(data_file_path, 'rb') as f: response_content = BytesIO(f.read()) else: print(f'Cannot update {key}, skipping...') continue if response_content is None: print(f'Update {key} failed, skipping...') continue if final_url is not None and not info.get('url'): # Keep permalinks that redirect info['url'] = final_url hashsum = hashlib.sha256() hashsum.update(response_content.getvalue()) hexsum = hashsum.hexdigest() if current_hash_sum != hexsum or not os.path.exists(data_file_path): print(hexsum) info['sha256'] = hexsum new_files = True print('Saving', key) with open(data_file_path, 'wb') as f: f.write(response_content.getvalue()) if new_files or data.get('northwest'): # Cleanup old meta data fields data.pop('northwest', None) data.pop('southeast', None) lat = data.pop('lat', None) lng = data.pop('lng', None) if lat is not None: data['coordinates'] = [lng, lat] if not data.get('hidden', True): data.pop('hidden', None) if data.get('active', False): data.pop('active', None) if 'added' not in data: data['added'] = now.isoformat() data['changed'] = now.isoformat() data['version'] = data.get('version', 0) + 1 yaml_part = ydump(data).decode('utf-8') with open(filename) as f: contents = f.read() parts = contents.split('---\n') parts[1] = yaml_part contents = '---\n'.join(parts) with open(filename, 'w') as f: f.write(contents) outpath = os.path.join(basepath, f'{cityid}.bin') if data.get('script') is not None: print('Applying post download script') subprocess.run( [data['script']], stdout=sys.stdout, stderr=sys.stderr, cwd=basepath ) if new_files or not os.path.exists(outpath): print('Calling mapnificent generator') subprocess.run(["./mapnificent_generator", "-d", data_path, "-o", outpath, '-v'], stdout=sys.stdout, stderr=sys.stderr) if __name__ == '__main__': main(sys.argv[1])
import datetime import re from typing import Any, Dict, List, Mapping from unittest import mock import orjson from django.conf import settings from confirmation.models import Confirmation, create_confirmation_link from zerver.lib.actions import ( do_add_deactivated_redirect, do_change_plan_type, do_change_realm_subdomain, do_create_realm, do_deactivate_realm, do_deactivate_stream, do_scrub_realm, do_send_realm_reactivation_email, do_set_realm_property, ) from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description from zerver.lib.send_email import send_future_email from zerver.lib.streams import create_stream_if_needed from zerver.lib.test_classes import ZulipTestCase from zerver.lib.test_helpers import reset_emails_in_zulip_realm, tornado_redirected_to_list from zerver.models import ( Attachment, CustomProfileField, Message, Realm, ScheduledEmail, UserMessage, UserProfile, get_realm, get_stream, get_user_profile_by_email, get_user_profile_by_id, ) class RealmTest(ZulipTestCase): def assert_user_profile_cache_gets_new_name(self, user_profile: UserProfile, new_realm_name: str) -> None: self.assertEqual(user_profile.realm.name, new_realm_name) def test_realm_creation_ensures_internal_realms(self) -> None: with mock.patch("zerver.lib.actions.server_initialized", return_value=False): with mock.patch("zerver.lib.actions.create_internal_realm") as mock_create_internal, \ self.assertLogs(level='INFO') as info_logs: do_create_realm("testrealm", "Test Realm") mock_create_internal.assert_called_once() self.assertEqual(info_logs.output, [ 'INFO:root:Server not yet initialized. Creating the internal realm first.' ]) def test_do_set_realm_name_caching(self) -> None: """The main complicated thing about setting realm names is fighting the cache, and we start by populating the cache for Hamlet, and we end by checking the cache to ensure that the new value is there.""" self.example_user('hamlet') realm = get_realm('zulip') new_name = 'Zed You Elle Eye Pea' do_set_realm_property(realm, 'name', new_name) self.assertEqual(get_realm(realm.string_id).name, new_name) self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name) def test_update_realm_name_events(self) -> None: realm = get_realm('zulip') new_name = 'Puliz' events: List[Mapping[str, Any]] = [] with tornado_redirected_to_list(events): do_set_realm_property(realm, 'name', new_name) event = events[0]['event'] self.assertEqual(event, dict( type='realm', op='update', property='name', value=new_name, )) def test_update_realm_description_events(self) -> None: realm = get_realm('zulip') new_description = 'zulip dev group' events: List[Mapping[str, Any]] = [] with tornado_redirected_to_list(events): do_set_realm_property(realm, 'description', new_description) event = events[0]['event'] self.assertEqual(event, dict( type='realm', op='update', property='description', value=new_description, )) def test_update_realm_description(self) -> None: self.login('iago') new_description = 'zulip dev group' data = dict(description=orjson.dumps(new_description).decode()) events: List[Mapping[str, Any]] = [] with tornado_redirected_to_list(events): result = self.client_patch('/json/realm', data) self.assert_json_success(result) realm = get_realm('zulip') self.assertEqual(realm.description, new_description) event = events[0]['event'] self.assertEqual(event, dict( type='realm', op='update', property='description', value=new_description, )) def test_realm_description_length(self) -> None: new_description = 'A' * 1001 data = dict(description=orjson.dumps(new_description).decode()) # create an admin user self.login('iago') result = self.client_patch('/json/realm', data) self.assert_json_error(result, 'Organization description is too long.') realm = get_realm('zulip') self.assertNotEqual(realm.description, new_description) def test_realm_name_length(self) -> None: new_name = 'A' * (Realm.MAX_REALM_NAME_LENGTH + 1) data = dict(name=orjson.dumps(new_name).decode()) # create an admin user self.login('iago') result = self.client_patch('/json/realm', data) self.assert_json_error(result, 'Organization name is too long.') realm = get_realm('zulip') self.assertNotEqual(realm.name, new_name) def test_admin_restrictions_for_changing_realm_name(self) -> None: new_name = 'Mice will play while the cat is away' self.login('othello') req = dict(name=orjson.dumps(new_name).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Must be an organization administrator') def test_unauthorized_name_change(self) -> None: data = {'full_name': 'Sir Hamlet'} user_profile = self.example_user('hamlet') self.login_user(user_profile) do_set_realm_property(user_profile.realm, 'name_changes_disabled', True) url = '/json/settings' result = self.client_patch(url, data) self.assertEqual(result.status_code, 200) # Since the setting fails silently, no message is returned self.assert_in_response("", result) # Realm admins can change their name even setting is disabled. data = {'full_name': 'New Iago'} self.login('iago') url = '/json/settings' result = self.client_patch(url, data) self.assert_in_success_response(['"full_name":"New Iago"'], result) def test_do_deactivate_realm_clears_user_realm_cache(self) -> None: """The main complicated thing about deactivating realm names is updating the cache, and we start by populating the cache for Hamlet, and we end by checking the cache to ensure that his realm appears to be deactivated. You can make this test fail by disabling cache.flush_realm().""" self.example_user('hamlet') realm = get_realm('zulip') do_deactivate_realm(realm) user = self.example_user('hamlet') self.assertTrue(user.realm.deactivated) def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None: """The main complicated thing about changing realm subdomains is updating the cache, and we start by populating the cache for Hamlet, and we end by checking the cache to ensure that his realm appears to be deactivated. You can make this test fail by disabling cache.flush_realm().""" user = get_user_profile_by_email('hamlet@zulip.com') realm = get_realm('zulip') do_change_realm_subdomain(realm, "newzulip") user = get_user_profile_by_email('hamlet@zulip.com') self.assertEqual(user.realm.string_id, "newzulip") # This doesn't use a cache right now, but may later. with self.assertRaises(Realm.DoesNotExist): get_realm("zulip") def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None: user = self.example_user('hamlet') send_future_email('zerver/emails/followup_day1', user.realm, to_user_ids=[user.id], delay=datetime.timedelta(hours=1)) self.assertEqual(ScheduledEmail.objects.count(), 1) do_deactivate_realm(user.realm) self.assertEqual(ScheduledEmail.objects.count(), 0) def test_do_change_realm_description_clears_cached_descriptions(self) -> None: realm = get_realm('zulip') rendered_description = get_realm_rendered_description(realm) text_description = get_realm_text_description(realm) realm.description = 'New Description' realm.save(update_fields=['description']) new_rendered_description = get_realm_rendered_description(realm) self.assertNotEqual(rendered_description, new_rendered_description) self.assertIn(realm.description, new_rendered_description) new_text_description = get_realm_text_description(realm) self.assertNotEqual(text_description, new_text_description) self.assertEqual(realm.description, new_text_description) def test_do_deactivate_realm_on_deactivated_realm(self) -> None: """Ensure early exit is working in realm deactivation""" realm = get_realm('zulip') self.assertFalse(realm.deactivated) do_deactivate_realm(realm) self.assertTrue(realm.deactivated) do_deactivate_realm(realm) self.assertTrue(realm.deactivated) def test_do_set_deactivated_redirect_on_deactivated_realm(self) -> None: """Ensure that the redirect url is working when deactivating realm""" realm = get_realm('zulip') redirect_url = 'new_server.zulip.com' do_deactivate_realm(realm) self.assertTrue(realm.deactivated) do_add_deactivated_redirect(realm, redirect_url) self.assertEqual(realm.deactivated_redirect, redirect_url) new_redirect_url = 'test.zulip.com' do_add_deactivated_redirect(realm, new_redirect_url) self.assertEqual(realm.deactivated_redirect, new_redirect_url) self.assertNotEqual(realm.deactivated_redirect, redirect_url) def test_realm_reactivation_link(self) -> None: realm = get_realm('zulip') do_deactivate_realm(realm) self.assertTrue(realm.deactivated) confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION) response = self.client_get(confirmation_url) self.assert_in_success_response(['Your organization has been successfully reactivated'], response) realm = get_realm('zulip') self.assertFalse(realm.deactivated) def test_realm_reactivation_confirmation_object(self) -> None: realm = get_realm('zulip') do_deactivate_realm(realm) self.assertTrue(realm.deactivated) create_confirmation_link(realm, Confirmation.REALM_REACTIVATION) confirmation = Confirmation.objects.last() self.assertEqual(confirmation.content_object, realm) self.assertEqual(confirmation.realm, realm) def test_do_send_realm_reactivation_email(self) -> None: realm = get_realm('zulip') do_send_realm_reactivation_email(realm) from django.core.mail import outbox self.assertEqual(len(outbox), 1) self.assertRegex( outbox[0].from_email, fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z", ) self.assertIn('Reactivate your Zulip organization', outbox[0].subject) self.assertIn('Dear former administrators', outbox[0].body) admins = realm.get_human_admin_users() confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email) response = self.client_get(confirmation_url) self.assert_in_success_response(['Your organization has been successfully reactivated'], response) realm = get_realm('zulip') self.assertFalse(realm.deactivated) def test_realm_reactivation_with_random_link(self) -> None: random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b" response = self.client_get(random_link) self.assert_in_success_response(['The organization reactivation link has expired or is not valid.'], response) def test_change_notifications_stream(self) -> None: # We need an admin user. self.login('iago') disabled_notif_stream_id = -1 req = dict(notifications_stream_id = orjson.dumps(disabled_notif_stream_id).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm('zulip') self.assertEqual(realm.notifications_stream, None) new_notif_stream_id = 4 req = dict(notifications_stream_id = orjson.dumps(new_notif_stream_id).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm('zulip') assert realm.notifications_stream is not None self.assertEqual(realm.notifications_stream.id, new_notif_stream_id) invalid_notif_stream_id = 1234 req = dict(notifications_stream_id = orjson.dumps(invalid_notif_stream_id).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid stream id') realm = get_realm('zulip') assert realm.notifications_stream is not None self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id) def test_get_default_notifications_stream(self) -> None: realm = get_realm("zulip") verona = get_stream("verona", realm) realm.notifications_stream_id = verona.id realm.save(update_fields=["notifications_stream"]) notifications_stream = realm.get_notifications_stream() assert notifications_stream is not None self.assertEqual(notifications_stream.id, verona.id) do_deactivate_stream(notifications_stream) self.assertIsNone(realm.get_notifications_stream()) def test_change_signup_notifications_stream(self) -> None: # We need an admin user. self.login('iago') disabled_signup_notifications_stream_id = -1 req = dict(signup_notifications_stream_id = orjson.dumps(disabled_signup_notifications_stream_id).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm('zulip') self.assertEqual(realm.signup_notifications_stream, None) new_signup_notifications_stream_id = 4 req = dict(signup_notifications_stream_id = orjson.dumps(new_signup_notifications_stream_id).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm('zulip') assert realm.signup_notifications_stream is not None self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id) invalid_signup_notifications_stream_id = 1234 req = dict(signup_notifications_stream_id = orjson.dumps(invalid_signup_notifications_stream_id).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid stream id') realm = get_realm('zulip') assert realm.signup_notifications_stream is not None self.assertNotEqual(realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id) def test_get_default_signup_notifications_stream(self) -> None: realm = get_realm("zulip") verona = get_stream("verona", realm) realm.signup_notifications_stream = verona realm.save(update_fields=["signup_notifications_stream"]) signup_notifications_stream = realm.get_signup_notifications_stream() assert signup_notifications_stream is not None self.assertEqual(signup_notifications_stream, verona) do_deactivate_stream(signup_notifications_stream) self.assertIsNone(realm.get_signup_notifications_stream()) def test_change_realm_default_language(self) -> None: new_lang = "de" realm = get_realm('zulip') self.assertNotEqual(realm.default_language, new_lang) # we need an admin user. self.login('iago') req = dict(default_language=orjson.dumps(new_lang).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm('zulip') self.assertEqual(realm.default_language, new_lang) # Test to make sure that when invalid languages are passed # as the default realm language, correct validation error is # raised and the invalid language is not saved in db invalid_lang = "invalid_lang" req = dict(default_language=orjson.dumps(invalid_lang).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, f"Invalid language '{invalid_lang}'") realm = get_realm('zulip') self.assertNotEqual(realm.default_language, invalid_lang) def test_deactivate_realm_by_owner(self) -> None: self.login('desdemona') realm = get_realm('zulip') self.assertFalse(realm.deactivated) result = self.client_post('/json/realm/deactivate') self.assert_json_success(result) realm = get_realm('zulip') self.assertTrue(realm.deactivated) def test_deactivate_realm_by_non_owner(self) -> None: self.login('iago') realm = get_realm('zulip') self.assertFalse(realm.deactivated) result = self.client_post('/json/realm/deactivate') self.assert_json_error(result, "Must be an organization owner") realm = get_realm('zulip') self.assertFalse(realm.deactivated) def test_change_bot_creation_policy(self) -> None: # We need an admin user. self.login('iago') req = dict(bot_creation_policy = orjson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) invalid_add_bot_permission = 4 req = dict(bot_creation_policy = orjson.dumps(invalid_add_bot_permission).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid bot_creation_policy') def test_change_email_address_visibility(self) -> None: # We need an admin user. user_profile = self.example_user("iago") hamlet = self.example_user("hamlet") cordelia = self.example_user("cordelia") self.login_user(user_profile) invalid_value = 12 req = dict(email_address_visibility = orjson.dumps(invalid_value).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid email_address_visibility') reset_emails_in_zulip_realm() realm = get_realm("zulip") req = dict(email_address_visibility = orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm("zulip") self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS) edited_user_profile = get_user_profile_by_id(user_profile.id) self.assertEqual(edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver") # Check normal user cannot access email result = self.api_get(cordelia, f"/api/v1/users/{hamlet.id}") self.assert_json_success(result) self.assertEqual(result.json()['user']['email'], f'user{hamlet.id}@zulip.testserver') self.assertEqual(result.json()['user'].get('delivery_email'), None) # Check administrator gets delivery_email with EMAIL_ADDRESS_VISIBILITY_ADMINS result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}") self.assert_json_success(result) self.assertEqual(result.json()['user']['email'], f'user{hamlet.id}@zulip.testserver') self.assertEqual(result.json()['user'].get('delivery_email'), hamlet.delivery_email) req = dict(email_address_visibility = orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) realm = get_realm("zulip") self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY) edited_user_profile = get_user_profile_by_id(user_profile.id) self.assertEqual(edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver") # Check even administrator doesn't get delivery_email with # EMAIL_ADDRESS_VISIBILITY_NOBODY result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}") self.assert_json_success(result) self.assertEqual(result.json()['user']['email'], f'user{hamlet.id}@zulip.testserver') self.assertEqual(result.json()['user'].get('delivery_email'), None) def test_change_stream_creation_policy(self) -> None: # We need an admin user. self.login('iago') req = dict(create_stream_policy = orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) invalid_value = 10 req = dict(create_stream_policy = orjson.dumps(invalid_value).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid create_stream_policy') def test_change_invite_to_stream_policy(self) -> None: # We need an admin user. self.login('iago') req = dict(invite_to_stream_policy = orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) invalid_value = 10 req = dict(invite_to_stream_policy = orjson.dumps(invalid_value).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid invite_to_stream_policy') def test_user_group_edit_policy(self) -> None: # We need an admin user. self.login('iago') req = dict(user_group_edit_policy = orjson.dumps(Realm.USER_GROUP_EDIT_POLICY_ADMINS).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) invalid_value = 10 req = dict(user_group_edit_policy = orjson.dumps(invalid_value).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid user_group_edit_policy') def test_private_message_policy(self) -> None: # We need an admin user. self.login('iago') req = dict(private_message_policy = orjson.dumps(Realm.PRIVATE_MESSAGE_POLICY_DISABLED).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) invalid_value = 10 req = dict(private_message_policy = orjson.dumps(invalid_value).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid private_message_policy') def test_change_wildcard_mention_policy(self) -> None: # We need an admin user. self.login('iago') req = dict(wildcard_mention_policy = orjson.dumps(Realm.WILDCARD_MENTION_POLICY_EVERYONE).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) invalid_value = 10 req = dict(wildcard_mention_policy = orjson.dumps(invalid_value).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, 'Invalid wildcard_mention_policy') def test_invalid_integer_attribute_values(self) -> None: integer_values = [key for key, value in Realm.property_types.items() if value is int] invalid_values = dict( bot_creation_policy=10, create_stream_policy=10, invite_to_stream_policy=10, email_address_visibility=10, message_retention_days=10, video_chat_provider=10, waiting_period_threshold=-10, digest_weekday=10, user_group_edit_policy=10, private_message_policy=10, message_content_delete_limit_seconds=-10, wildcard_mention_policy=10, ) # We need an admin user. self.login('iago') for name in integer_values: invalid_value = invalid_values.get(name) if invalid_value is None: raise AssertionError(f'No test created for {name}') self.do_test_invalid_integer_attribute_value(name, invalid_value) def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None: possible_messages = { f"Invalid {val_name}", f"Bad value for '{val_name}'", f"Bad value for '{val_name}': {invalid_val}", f"Invalid {val_name} {invalid_val}", } req = {val_name: invalid_val} result = self.client_patch('/json/realm', req) msg = self.get_json_error(result) self.assertTrue(msg in possible_messages) def test_change_video_chat_provider(self) -> None: self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']) self.login('iago') invalid_video_chat_provider_value = 10 req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()} result = self.client_patch('/json/realm', req) self.assert_json_error(result, ("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value)) req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['disabled']['id']).decode()} result = self.client_patch('/json/realm', req) self.assert_json_success(result) self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['disabled']['id']) req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']).decode()} result = self.client_patch('/json/realm', req) self.assert_json_success(result) self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']) req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['big_blue_button']['id']).decode()} result = self.client_patch('/json/realm', req) self.assert_json_success(result) self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['big_blue_button']['id']) req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['zoom']['id']).decode()} result = self.client_patch('/json/realm', req) self.assert_json_success(result) def test_initial_plan_type(self) -> None: with self.settings(BILLING_ENABLED=True): self.assertEqual(do_create_realm('hosted', 'hosted').plan_type, Realm.LIMITED) self.assertEqual(get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX) self.assertEqual(get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED) self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED) with self.settings(BILLING_ENABLED=False): self.assertEqual(do_create_realm('onpremise', 'onpremise').plan_type, Realm.SELF_HOSTED) self.assertEqual(get_realm('onpremise').max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX) self.assertEqual(get_realm('onpremise').message_visibility_limit, None) self.assertEqual(get_realm("onpremise").upload_quota_gb, None) def test_change_plan_type(self) -> None: realm = get_realm('zulip') self.assertEqual(realm.plan_type, Realm.SELF_HOSTED) self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX) self.assertEqual(realm.message_visibility_limit, None) self.assertEqual(realm.upload_quota_gb, None) do_change_plan_type(realm, Realm.STANDARD) realm = get_realm('zulip') self.assertEqual(realm.plan_type, Realm.STANDARD) self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX) self.assertEqual(realm.message_visibility_limit, None) self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD) do_change_plan_type(realm, Realm.LIMITED) realm = get_realm('zulip') self.assertEqual(realm.plan_type, Realm.LIMITED) self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX) self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED) self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED) do_change_plan_type(realm, Realm.STANDARD_FREE) realm = get_realm('zulip') self.assertEqual(realm.plan_type, Realm.STANDARD_FREE) self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX) self.assertEqual(realm.message_visibility_limit, None) self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD) do_change_plan_type(realm, Realm.LIMITED) do_change_plan_type(realm, Realm.SELF_HOSTED) self.assertEqual(realm.plan_type, Realm.SELF_HOSTED) self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX) self.assertEqual(realm.message_visibility_limit, None) self.assertEqual(realm.upload_quota_gb, None) def test_message_retention_days(self) -> None: self.login('iago') realm = get_realm('zulip') self.assertEqual(realm.plan_type, Realm.SELF_HOSTED) req = dict(message_retention_days=orjson.dumps(10).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, "Must be an organization owner") self.login('desdemona') req = dict(message_retention_days=orjson.dumps(0).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, "Bad value for 'message_retention_days': 0") req = dict(message_retention_days=orjson.dumps(-10).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error( result, "Bad value for 'message_retention_days': -10") req = dict(message_retention_days=orjson.dumps('invalid').decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, "Bad value for 'message_retention_days': invalid") req = dict(message_retention_days=orjson.dumps(-1).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error(result, "Bad value for 'message_retention_days': -1") req = dict(message_retention_days=orjson.dumps('forever').decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) req = dict(message_retention_days=orjson.dumps(10).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) do_change_plan_type(realm, Realm.LIMITED) req = dict(message_retention_days=orjson.dumps(10).decode()) result = self.client_patch('/json/realm', req) self.assert_json_error( result, "Available on Zulip Standard. Upgrade to access.") do_change_plan_type(realm, Realm.STANDARD) req = dict(message_retention_days=orjson.dumps(10).decode()) result = self.client_patch('/json/realm', req) self.assert_json_success(result) class RealmAPITest(ZulipTestCase): def setUp(self) -> None: super().setUp() self.login('desdemona') def set_up_db(self, attr: str, value: Any) -> None: realm = get_realm('zulip') setattr(realm, attr, value) realm.save(update_fields=[attr]) def update_with_api(self, name: str, value: int) -> Realm: result = self.client_patch('/json/realm', {name: orjson.dumps(value).decode()}) self.assert_json_success(result) return get_realm('zulip') # refresh data def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm: result = self.client_patch('/json/realm', data_dict) self.assert_json_success(result) return get_realm('zulip') def do_test_realm_update_api(self, name: str) -> None: """Test updating realm properties. If new realm properties have been added to the Realm model but the test_values dict below has not been updated, this will raise an assertion error. """ bool_tests: List[bool] = [False, True] test_values: Dict[str, Any] = dict( default_language=['de', 'en'], default_code_block_language=['javascript', ''], description=['Realm description', 'New description'], digest_weekday=[0, 1, 2], message_retention_days=[10, 20], name=['Zulip', 'New Name'], waiting_period_threshold=[10, 20], create_stream_policy=[Realm.POLICY_ADMINS_ONLY, Realm.POLICY_MEMBERS_ONLY, Realm.POLICY_FULL_MEMBERS_ONLY], user_group_edit_policy=[Realm.USER_GROUP_EDIT_POLICY_ADMINS, Realm.USER_GROUP_EDIT_POLICY_MEMBERS], private_message_policy=[Realm.PRIVATE_MESSAGE_POLICY_UNLIMITED, Realm.PRIVATE_MESSAGE_POLICY_DISABLED], invite_to_stream_policy=[Realm.POLICY_ADMINS_ONLY, Realm.POLICY_MEMBERS_ONLY, Realm.POLICY_FULL_MEMBERS_ONLY], wildcard_mention_policy=[Realm.WILDCARD_MENTION_POLICY_EVERYONE, Realm.WILDCARD_MENTION_POLICY_MEMBERS, Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS, Realm.WILDCARD_MENTION_POLICY_STREAM_ADMINS, Realm.WILDCARD_MENTION_POLICY_ADMINS, Realm.WILDCARD_MENTION_POLICY_NOBODY], bot_creation_policy=[1, 2], email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS, Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY], video_chat_provider=[ dict( video_chat_provider=orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']).decode(), ), ], message_content_delete_limit_seconds=[1000, 1100, 1200] ) vals = test_values.get(name) if Realm.property_types[name] is bool: vals = bool_tests if vals is None: raise AssertionError(f'No test created for {name}') if name == 'video_chat_provider': self.set_up_db(name, vals[0][name]) realm = self.update_with_api_multiple_value(vals[0]) self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name])) else: self.set_up_db(name, vals[0]) realm = self.update_with_api(name, vals[1]) self.assertEqual(getattr(realm, name), vals[1]) realm = self.update_with_api(name, vals[0]) self.assertEqual(getattr(realm, name), vals[0]) def test_update_realm_properties(self) -> None: for prop in Realm.property_types: with self.subTest(property=prop): self.do_test_realm_update_api(prop) def test_update_realm_allow_message_editing(self) -> None: """Tests updating the realm property 'allow_message_editing'.""" self.set_up_db('allow_message_editing', False) self.set_up_db('message_content_edit_limit_seconds', 0) self.set_up_db('allow_community_topic_editing', False) realm = self.update_with_api('allow_message_editing', True) realm = self.update_with_api('message_content_edit_limit_seconds', 100) realm = self.update_with_api('allow_community_topic_editing', True) self.assertEqual(realm.allow_message_editing, True) self.assertEqual(realm.message_content_edit_limit_seconds, 100) self.assertEqual(realm.allow_community_topic_editing, True) realm = self.update_with_api('allow_message_editing', False) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 100) self.assertEqual(realm.allow_community_topic_editing, True) realm = self.update_with_api('message_content_edit_limit_seconds', 200) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.allow_community_topic_editing, True) realm = self.update_with_api('allow_community_topic_editing', False) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.allow_community_topic_editing, False) def test_update_realm_allow_message_deleting(self) -> None: """Tests updating the realm property 'allow_message_deleting'.""" self.set_up_db('allow_message_deleting', True) self.set_up_db('message_content_delete_limit_seconds', 0) realm = self.update_with_api('allow_message_deleting', False) self.assertEqual(realm.allow_message_deleting, False) self.assertEqual(realm.message_content_delete_limit_seconds, 0) realm = self.update_with_api('allow_message_deleting', True) realm = self.update_with_api('message_content_delete_limit_seconds', 100) self.assertEqual(realm.allow_message_deleting, True) self.assertEqual(realm.message_content_delete_limit_seconds, 100) realm = self.update_with_api('message_content_delete_limit_seconds', 600) self.assertEqual(realm.allow_message_deleting, True) self.assertEqual(realm.message_content_delete_limit_seconds, 600) class ScrubRealmTest(ZulipTestCase): def test_scrub_realm(self) -> None: zulip = get_realm("zulip") lear = get_realm("lear") iago = self.example_user("iago") othello = self.example_user("othello") cordelia = self.lear_user("cordelia") king = self.lear_user("king") create_stream_if_needed(lear, "Shakespeare") self.subscribe(cordelia, "Shakespeare") self.subscribe(king, "Shakespeare") Message.objects.all().delete() UserMessage.objects.all().delete() for i in range(5): self.send_stream_message(iago, "Scotland") self.send_stream_message(othello, "Scotland") self.send_stream_message(cordelia, "Shakespeare") self.send_stream_message(king, "Shakespeare") Attachment.objects.filter(realm=zulip).delete() Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512) Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512) Attachment.objects.filter(realm=lear).delete() Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512) Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512) CustomProfileField.objects.create(realm=lear) self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10) self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10) self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20) self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20) self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0) with self.assertLogs(level="WARNING"): do_scrub_realm(zulip) self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0) self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10) self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0) self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20) self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0) self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2) self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0) self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0) zulip_users = UserProfile.objects.filter(realm=zulip) for user in zulip_users: self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name)) self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email)) self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email)) lear_users = UserProfile.objects.filter(realm=lear) for user in lear_users: self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name)) self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email)) self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
from kinto.core.testing import unittest from .support import BaseWebTest BUCKET_URL = '/buckets/blog' COLLECTION_URL = '/buckets/blog/collections/articles' RECORDS_URL = '/buckets/blog/collections/articles/records' SCHEMA = { "title": "Blog post schema", "type": "object", "properties": { "title": {"type": "string"}, "body": {"type": "string"}, }, "required": ["title"] } VALID_RECORD = {'title': 'About us', 'body': '<h1>About</h1>'} class DeactivatedSchemaTest(BaseWebTest, unittest.TestCase): def test_schema_should_be_json_schema(self): newschema = SCHEMA.copy() newschema['type'] = 'Washmachine' self.app.put_json(BUCKET_URL, headers=self.headers) self.app.put(COLLECTION_URL, headers=self.headers) resp = self.app.put_json(COLLECTION_URL, {'data': {'schema': newschema}}, headers=self.headers, status=400) error_msg = "'Washmachine' is not valid under any of the given schemas" self.assertIn(error_msg, resp.json['message']) def test_records_are_not_invalid_if_do_not_match_schema(self): self.app.put_json(BUCKET_URL, headers=self.headers) self.app.put(COLLECTION_URL, headers=self.headers) resp = self.app.put_json(COLLECTION_URL, {'data': {'schema': SCHEMA}}, headers=self.headers) self.collection = resp.json['data'] self.app.post_json(RECORDS_URL, {'data': {'body': '<h1>Without title</h1>'}}, headers=self.headers, status=201) class BaseWebTestWithSchema(BaseWebTest): def get_app_settings(self, extras=None): settings = super(BaseWebTestWithSchema, self).get_app_settings(extras) settings['experimental_collection_schema_validation'] = 'True' return settings def setUp(self): super(BaseWebTestWithSchema, self).setUp() self.app.put_json(BUCKET_URL, headers=self.headers) self.app.put_json(COLLECTION_URL, headers=self.headers) class MissingSchemaTest(BaseWebTestWithSchema, unittest.TestCase): def test_attribute_is_none_if_no_schema_defined(self): resp = self.app.get(COLLECTION_URL, headers=self.headers) self.assertIsNone(resp.json['data'].get('')) def test_accepts_any_kind_of_record(self): record = {'title': 'Troll'} self.app.post_json(RECORDS_URL, {'data': record}, headers=self.headers, status=201) record = {'author': {'age': 32, 'status': 'captain'}} self.app.post_json(RECORDS_URL, {'data': record}, headers=self.headers, status=201) class InvalidSchemaTest(BaseWebTestWithSchema, unittest.TestCase): def test_schema_should_be_json_schema(self): newschema = SCHEMA.copy() newschema['type'] = 'Washmachine' resp = self.app.put_json(COLLECTION_URL, {'data': {'schema': newschema}}, headers=self.headers, status=400) error_msg = "'Washmachine' is not valid under any of the given schemas" self.assertIn(error_msg, resp.json['message']) class RecordsValidationTest(BaseWebTestWithSchema, unittest.TestCase): def setUp(self): super(RecordsValidationTest, self).setUp() resp = self.app.put_json(COLLECTION_URL, {'data': {'schema': SCHEMA}}, headers=self.headers) self.collection = resp.json['data'] def test_empty_record_can_be_validated(self): self.app.post_json(RECORDS_URL, {'data': {}}, headers=self.headers, status=400) def test_records_are_valid_if_match_schema(self): self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers, status=201) def test_records_are_invalid_if_do_not_match_schema(self): self.app.post_json(RECORDS_URL, {'data': {'body': '<h1>Without title</h1>'}}, headers=self.headers, status=400) def test_records_are_validated_on_patch(self): resp = self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers, status=201) record_id = resp.json['data']['id'] self.app.patch_json('%s/%s' % (RECORDS_URL, record_id), {'data': {'title': 3.14}}, headers=self.headers, status=400) def test_records_are_validated_on_put(self): resp = self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers, status=201) record_id = resp.json['data']['id'] self.app.put_json('%s/%s' % (RECORDS_URL, record_id), {'data': {'body': '<h1>Without title</h1>'}}, headers=self.headers, status=400) def test_validation_error_response_provides_details(self): resp = self.app.post_json(RECORDS_URL, {'data': {'body': '<h1>Without title</h1>'}}, headers=self.headers, status=400) self.assertIn("'title' is a required property", resp.json['message']) self.assertEqual(resp.json['details'][0]['name'], 'title') def test_records_of_other_bucket_are_not_impacted(self): self.app.put_json('/buckets/cms', headers=self.headers) self.app.put_json('/buckets/cms/collections/articles', headers=self.headers) self.app.post_json('/buckets/cms/collections/articles/records', {'data': {'body': '<h1>Without title</h1>'}}, headers=self.headers) def test_records_receive_the_schema_as_attribute(self): resp = self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers, status=201) self.assertEqual(resp.json['data']['schema'], self.collection['last_modified']) def test_records_can_filtered_by_schema_version(self): self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers) resp = self.app.put_json(COLLECTION_URL, {'data': {'schema': SCHEMA}}, headers=self.headers) schema_version = resp.json['data']['last_modified'] self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers) resp = self.app.get(RECORDS_URL + '?min_schema=%s' % schema_version, headers=self.headers) self.assertEqual(len(resp.json['data']), 1) class ExtraPropertiesValidationTest(BaseWebTestWithSchema, unittest.TestCase): def setUp(self): super(ExtraPropertiesValidationTest, self).setUp() schema = SCHEMA.copy() schema['additionalProperties'] = False resp = self.app.put_json(COLLECTION_URL, {'data': {'schema': schema}}, headers=self.headers) self.collection = resp.json['data'] def test_record_can_be_validated_on_post(self): self.app.post_json(RECORDS_URL, {'data': VALID_RECORD}, headers=self.headers) def test_record_can_be_validated_on_put(self): record_id = '5443d83f-852a-481a-8e9d-5aa804b05b08' self.app.put_json('%s/%s' % (RECORDS_URL, record_id), {'data': VALID_RECORD}, headers=self.headers) def test_records_are_validated_on_patch(self): record_id = '5443d83f-852a-481a-8e9d-5aa804b05b08' record_url = '%s/%s' % (RECORDS_URL, record_id) resp = self.app.put_json(record_url, {'data': VALID_RECORD}, headers=self.headers) record = resp.json['data'] assert 'schema' in record record['title'] = 'hey' self.app.patch_json(record_url, {'data': record}, headers=self.headers) def test_additional_properties_are_rejected(self): record_id = '5443d83f-852a-481a-8e9d-5aa804b05b08' record = VALID_RECORD.copy() record['extra'] = 'blah!' resp = self.app.put_json('%s/%s' % (RECORDS_URL, record_id), {'data': record}, headers=self.headers, status=400) assert "'extra' was unexpected)" in resp.json['message']
import imp import importlib import os import sys import time import traceback import zipfile import sublime import sublime_api api_ready = False application_command_classes = [] window_command_classes = [] text_command_classes = [] view_event_listener_classes = [] view_event_listeners = {} all_command_classes = [ application_command_classes, window_command_classes, text_command_classes] all_callbacks = { 'on_new': [], 'on_clone': [], 'on_load': [], 'on_pre_close': [], 'on_close': [], 'on_pre_save': [], 'on_post_save': [], 'on_modified': [], 'on_selection_modified': [], 'on_activated': [], 'on_deactivated': [], 'on_query_context': [], 'on_query_completions': [], 'on_hover': [], 'on_text_command': [], 'on_window_command': [], 'on_post_text_command': [], 'on_post_window_command': [], 'on_modified_async': [], 'on_selection_modified_async': [], 'on_pre_save_async': [], 'on_post_save_async': [], 'on_activated_async': [], 'on_deactivated_async': [], 'on_new_async': [], 'on_load_async': [], 'on_clone_async': []} profile = {} def unload_module(module): if "plugin_unloaded" in module.__dict__: module.plugin_unloaded() # Check unload_handler too, for backwards compat if "unload_handler" in module.__dict__: module.unload_handler() # Unload the old plugins if "plugins" in module.__dict__: for view_id, listener_instances in view_event_listeners.items(): for vel in listener_instances[:]: if vel.__class__ in module.plugins: listener_instances.remove(vel) for p in module.plugins: for cmd_cls_list in all_command_classes: try: cmd_cls_list.remove(p) except ValueError: pass for c in all_callbacks.values(): try: c.remove(p) except ValueError: pass try: view_event_listener_classes.remove(p) except ValueError: pass def unload_plugin(modulename): print("unloading plugin", modulename) was_loaded = modulename in sys.modules if was_loaded: m = sys.modules[modulename] unload_module(m) del sys.modules[modulename] def reload_plugin(modulename): print("reloading plugin", modulename) if modulename in sys.modules: m = sys.modules[modulename] unload_module(m) m = imp.reload(m) else: m = importlib.import_module(modulename) module_plugins = [] on_activated_targets = [] module_view_event_listener_classes = [] for type_name in dir(m): try: t = m.__dict__[type_name] if t.__bases__: is_plugin = False if issubclass(t, ApplicationCommand): application_command_classes.append(t) is_plugin = True if issubclass(t, WindowCommand): window_command_classes.append(t) is_plugin = True if issubclass(t, TextCommand): text_command_classes.append(t) is_plugin = True if is_plugin: module_plugins.append(t) if issubclass(t, EventListener): obj = t() for p in all_callbacks.items(): if p[0] in dir(obj): p[1].append(obj) if "on_activated" in dir(obj): on_activated_targets.append(obj) module_plugins.append(obj) if issubclass(t, ViewEventListener): view_event_listener_classes.append(t) module_view_event_listener_classes.append(t) module_plugins.append(t) except AttributeError: pass if len(module_plugins) > 0: m.plugins = module_plugins if api_ready: if "plugin_loaded" in m.__dict__: try: m.plugin_loaded() except: traceback.print_exc() # Synthesize any required on_activated calls for el in on_activated_targets: w = sublime.active_window() if w: v = w.active_view() if v: try: el.on_activated(v) except: traceback.print_exc() # Create any require ViewEventListener objects if len(module_view_event_listener_classes) > 0: for w in sublime.windows(): for v in w.views(): create_view_event_listeners( module_view_event_listener_classes, v) def create_application_commands(): cmds = [] for class_ in application_command_classes: cmds.append(class_()) sublime_api.notify_application_commands(cmds) def create_window_commands(window_id): window = sublime.Window(window_id) cmds = [] for class_ in window_command_classes: cmds.append(class_(window)) return cmds def create_text_commands(view_id): view = sublime.View(view_id) cmds = [] for class_ in text_command_classes: cmds.append(class_(view)) return cmds def on_api_ready(): global api_ready api_ready = True for m in list(sys.modules.values()): if "plugin_loaded" in m.__dict__: try: m.plugin_loaded() except: traceback.print_exc() # Synthesize an on_activated call w = sublime.active_window() if w: view_id = sublime_api.window_active_view(w.window_id) if view_id != 0: try: on_activated(view_id) except: traceback.print_exc() # Create ViewEventListener instances if len(view_event_listener_classes) > 0: for w in sublime.windows(): for v in w.views(): attach_view(v) def is_view_event_listener_applicable(cls, view): if not cls.is_applicable(view.settings()): return False if cls.applies_to_primary_view_only() and not view.is_primary(): return False return True def create_view_event_listeners(classes, view): if len(classes) > 0: if view.view_id not in view_event_listeners: view_event_listeners[view.view_id] = [] for c in classes: if is_view_event_listener_applicable(c, view): view_event_listeners[view.view_id].append(c(view)) def check_view_event_listeners(view): if len(view_event_listener_classes) > 0: if view.view_id not in view_event_listeners: view_event_listeners[view.view_id] = [] listeners = view_event_listeners[view.view_id] for cls in view_event_listener_classes: found = False instance = None for l in listeners: if l.__class__ == cls: found = True instance = l break want = is_view_event_listener_applicable(cls, view) if want and not found: listeners.append(cls(view)) elif found and not want: listeners.remove(instance) def attach_view(view): check_view_event_listeners(view) view.settings().add_on_change( "check_view_event_listeners", lambda: check_view_event_listeners(view)) check_all_view_event_listeners_scheduled = False def check_all_view_event_listeners(): global check_all_view_event_listeners_scheduled check_all_view_event_listeners_scheduled = False for w in sublime.windows(): for v in w.views(): check_view_event_listeners(v) def detach_view(view): if view.view_id in view_event_listeners: del view_event_listeners[view.view_id] # A view has closed, which implies 'is_primary' may have changed, so see if # any of the ViewEventListener classes need to be created. # Call this in a timeout, as 'view' will still be reporting itself as a # primary at this stage global check_all_view_event_listeners_scheduled if not check_all_view_event_listeners_scheduled: check_all_view_event_listeners_scheduled = True sublime.set_timeout(check_all_view_event_listeners) def event_listeners_for_view(view): if view.view_id in view_event_listeners: return view_event_listeners[view.view_id] else: return [] def find_view_event_listener(view, cls): if view.view_id in view_event_listeners: for vel in view_event_listeners[view.view_id]: if vel.__class__ == cls: return vel return None def on_new(view_id): v = sublime.View(view_id) attach_view(v) for callback in all_callbacks['on_new']: try: callback.on_new(v) except: traceback.print_exc() def on_new_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_new_async']: try: callback.on_new_async(v) except: traceback.print_exc() def on_clone(view_id): v = sublime.View(view_id) attach_view(v) for callback in all_callbacks['on_clone']: try: callback.on_clone(v) except: traceback.print_exc() def on_clone_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_clone_async']: try: callback.on_clone_async(v) except: traceback.print_exc() class Summary(object): def __init__(self): self.max = 0.0 self.sum = 0.0 self.count = 0 def record(self, x): self.count += 1 self.sum += x self.max = max(self.max, x) def __str__(self): if self.count > 1: return "{0:.3f}s total, mean: {1:.3f}s, max: {2:.3f}s".format(self.sum, self.sum / self.count, self.max) elif self.count == 1: return "{0:.3f}s total".format(self.sum) else: return "0s total" def run_callback(event, callback, expr): t0 = time.time() try: expr() except: traceback.print_exc() elapsed = time.time() - t0 if event not in profile: profile[event] = {} p = profile[event] name = callback.__module__ if name not in p: p[name] = Summary() p[name].record(elapsed) def run_view_listener_callback(view, name): for vel in event_listeners_for_view(view): if name in vel.__class__.__dict__: run_callback(name, vel, lambda: vel.__class__.__dict__[name](vel)) def run_async_view_listener_callback(view, name): for vel in event_listeners_for_view(view): if name in vel.__class__.__dict__: try: vel.__class__.__dict__[name](vel) except: traceback.print_exc() def on_load(view_id): v = sublime.View(view_id) attach_view(v) for callback in all_callbacks['on_load']: run_callback('on_load', callback, lambda: callback.on_load(v)) def on_load_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_load_async']: try: callback.on_load_async(v) except: traceback.print_exc() def on_pre_close(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_pre_close']: run_callback('on_pre_close', callback, lambda: callback.on_pre_close(v)) def on_close(view_id): v = sublime.View(view_id) detach_view(v) for callback in all_callbacks['on_close']: run_callback('on_close', callback, lambda: callback.on_close(v)) def on_pre_save(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_pre_save']: run_callback('on_pre_save', callback, lambda: callback.on_pre_save(v)) def on_pre_save_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_pre_save_async']: try: callback.on_pre_save_async(v) except: traceback.print_exc() def on_post_save(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_post_save']: run_callback('on_post_save', callback, lambda: callback.on_post_save(v)) def on_post_save_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_post_save_async']: try: callback.on_post_save_async(v) except: traceback.print_exc() def on_modified(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_modified']: run_callback('on_modified', callback, lambda: callback.on_modified(v)) run_view_listener_callback(v, 'on_modified') def on_modified_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_modified_async']: try: callback.on_modified_async(v) except: traceback.print_exc() run_async_view_listener_callback(v, 'on_modified_async') def on_selection_modified(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_selection_modified']: run_callback('on_selection_modified', callback, lambda: callback.on_selection_modified(v)) run_view_listener_callback(v, 'on_selection_modified') def on_selection_modified_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_selection_modified_async']: try: callback.on_selection_modified_async(v) except: traceback.print_exc() run_async_view_listener_callback(v, 'on_selection_modified_async') def on_activated(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_activated']: run_callback('on_activated', callback, lambda: callback.on_activated(v)) run_view_listener_callback(v, 'on_activated') def on_activated_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_activated_async']: try: callback.on_activated_async(v) except: traceback.print_exc() run_async_view_listener_callback(v, 'on_activated_async') def on_deactivated(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_deactivated']: run_callback('on_deactivated', callback, lambda: callback.on_deactivated(v)) run_view_listener_callback(v, 'on_deactivated') def on_deactivated_async(view_id): v = sublime.View(view_id) for callback in all_callbacks['on_deactivated_async']: try: callback.on_deactivated_async(v) except: traceback.print_exc() run_async_view_listener_callback(v, 'on_deactivated_async') def on_query_context(view_id, key, operator, operand, match_all): v = sublime.View(view_id) for callback in all_callbacks['on_query_context']: try: val = callback.on_query_context(v, key, operator, operand, match_all) if val: return True except: traceback.print_exc() for vel in event_listeners_for_view(v): if 'on_query_context' in vel.__class__.__dict__: try: val = vel.on_query_context(key, operator, operand, match_all) if val: return True except: traceback.print_exc() return False def normalise_completion(c): if len(c) == 1: return (c[0], "", "") elif len(c) == 2: return (c[0], "", c[1]) else: return c def on_query_completions(view_id, prefix, locations): v = sublime.View(view_id) completions = [] flags = 0 for callback in all_callbacks['on_query_completions']: try: res = callback.on_query_completions(v, prefix, locations) if isinstance(res, tuple): completions += [normalise_completion(c) for c in res[0]] flags |= res[1] elif isinstance(res, list): completions += [normalise_completion(c) for c in res] except: traceback.print_exc() for vel in event_listeners_for_view(v): if 'on_query_completions' in vel.__class__.__dict__: try: res = vel.on_query_completions(prefix, locations) if isinstance(res, tuple): completions += [normalise_completion(c) for c in res[0]] flags |= res[1] elif isinstance(res, list): completions += [normalise_completion(c) for c in res] except: traceback.print_exc() return (completions, flags) def on_hover(view_id, point, hover_zone): v = sublime.View(view_id) for callback in all_callbacks['on_hover']: run_callback('on_hover', callback, lambda: callback.on_hover(v, point, hover_zone)) for vel in event_listeners_for_view(v): if 'on_hover' in vel.__class__.__dict__: try: vel.on_hover(point, hover_zone) except: traceback.print_exc() def on_text_command(view_id, name, args): v = sublime.View(view_id) for callback in all_callbacks['on_text_command']: try: res = callback.on_text_command(v, name, args) if isinstance(res, tuple): return res elif res: return (res, None) except: traceback.print_exc() return ("", None) def on_window_command(window_id, name, args): window = sublime.Window(window_id) for callback in all_callbacks['on_window_command']: try: res = callback.on_window_command(window, name, args) if isinstance(res, tuple): return res elif res: return (res, None) except: traceback.print_exc() return ("", None) def on_post_text_command(view_id, name, args): v = sublime.View(view_id) for callback in all_callbacks['on_post_text_command']: try: callback.on_post_text_command(v, name, args) except: traceback.print_exc() def on_post_window_command(window_id, name, args): window = sublime.Window(window_id) for callback in all_callbacks['on_post_window_command']: try: callback.on_post_window_command(window, name, args) except: traceback.print_exc() class Command(object): def name(self): clsname = self.__class__.__name__ name = clsname[0].lower() last_upper = False for c in clsname[1:]: if c.isupper() and not last_upper: name += '_' name += c.lower() else: name += c last_upper = c.isupper() if name.endswith("_command"): name = name[0:-8] return name def is_enabled_(self, args): ret = None try: args = self.filter_args(args) if args: ret = self.is_enabled(**args) else: ret = self.is_enabled() except TypeError: ret = self.is_enabled() if not isinstance(ret, bool): raise ValueError("is_enabled must return a bool", self) return ret def is_enabled(self): return True def is_visible_(self, args): ret = None try: args = self.filter_args(args) if args: ret = self.is_visible(**args) else: ret = self.is_visible() except TypeError: ret = self.is_visible() if not isinstance(ret, bool): raise ValueError("is_visible must return a bool", self) return ret def is_visible(self): return True def is_checked_(self, args): ret = None try: args = self.filter_args(args) if args: ret = self.is_checked(**args) else: ret = self.is_checked() except TypeError: ret = self.is_checked() if not isinstance(ret, bool): raise ValueError("is_checked must return a bool", self) return ret def is_checked(self): return False def description_(self, args): try: args = self.filter_args(args) if args is not None: return self.description(**args) else: return self.description() except TypeError: return "" def description(self): return "" def filter_args(self, args): if args: if 'event' in args and not self.want_event(): args = args.copy() del args['event'] return args def want_event(self): return False class ApplicationCommand(Command): def run_(self, edit_token, args): args = self.filter_args(args) if args: return self.run(**args) else: return self.run() def run(self): pass class WindowCommand(Command): def __init__(self, window): self.window = window def run_(self, edit_token, args): args = self.filter_args(args) if args: return self.run(**args) else: return self.run() def run(self): pass class TextCommand(Command): def __init__(self, view): self.view = view def run_(self, edit_token, args): args = self.filter_args(args) if args: edit = self.view.begin_edit(edit_token, self.name(), args) try: return self.run(edit, **args) finally: self.view.end_edit(edit) else: edit = self.view.begin_edit(edit_token, self.name()) try: return self.run(edit) finally: self.view.end_edit(edit) def run(self, edit): pass class EventListener(object): pass class ViewEventListener(object): @classmethod def is_applicable(cls, settings): return True @classmethod def applies_to_primary_view_only(cls): return True def __init__(self, view): self.view = view class MultizipImporter(object): def __init__(self): self.loaders = [] self.file_loaders = [] def find_module(self, fullname, path=None): if not path: for l in self.loaders: if l.name == fullname: return l for l in self.loaders: if path == [l.zippath]: if l.has(fullname): return l return None class ZipLoader(object): def __init__(self, zippath): self.zippath = zippath self.name = os.path.splitext(os.path.basename(zippath))[0] self._scan_zip() def has(self, fullname): key = '.'.join(fullname.split('.')[1:]) if key in self.contents: return True override_file = os.path.join(override_path, os.sep.join(fullname.split('.')) + '.py') if os.path.isfile(override_file): return True override_package = os.path.join(override_path, os.sep.join(fullname.split('.'))) if os.path.isdir(override_package): return True return False def load_module(self, fullname): # Only if a module is being reloaded and hasn't been scanned recently # do we force a refresh of the contents of the .sublime-package. This # allows proper code upgrades using Package Control. if fullname in imp._RELOADING: if self.refreshed < time.time() - 5: self._scan_zip() source, source_path, mod_file, is_pkg = self._read_source(fullname) if source is None: raise ImportError("No module named '%s'" % fullname) is_new = False if fullname in sys.modules: mod = sys.modules[fullname] old_mod_file = mod.__file__ else: is_new = True mod = sys.modules.setdefault(fullname, imp.new_module(fullname)) mod.__name__ = fullname mod.__path__ = [self.zippath] mod.__loader__ = self mod.__file__ = mod_file if is_pkg: mod.__package__ = mod.__name__ else: mod.__package__ = fullname.rpartition('.')[0] try: exec(compile(source, source_path, 'exec'), mod.__dict__) return mod except: if is_new: del sys.modules[fullname] else: mod.__file__ = old_mod_file raise def _read_source(self, fullname): name_parts = fullname.split('.') override_basename = os.path.join(override_path, *name_parts) override_py = override_basename + '.py' override_init = os.path.join(override_basename, '__init__.py') if os.path.isfile(override_py): try: with open(override_py, 'r', encoding='utf-8') as f: return (f.read(), override_py, override_py, False) except (Exception) as e: print(override_py, 'could not be read:', e) if os.path.isfile(override_init): try: with open(override_init, 'r', encoding='utf-8') as f: return (f.read(), override_init, override_init, True) except (Exception) as e: print(override_init, 'could not be read:', e) key = '.'.join(name_parts[1:]) if key in self.contents: source = self.contents[key] source_path = key + " in " + self.zippath mod_file = os.path.join(self.zippath, self.filenames[key]).rstrip(os.sep) is_pkg = key in self.packages return (source, source_path, mod_file, is_pkg) # This allows .py overrides to exist in subfolders that: # 1. Do not exist in the .sublime-package file # 2. Do not contain an __init__.py if os.path.isdir(override_basename): return ('', override_basename, override_basename, True) return (None, None, None, False) def _scan_zip(self): self.contents = {"": ""} self.filenames = {"": ""} self.packages = {""} self.refreshed = time.time() try: with zipfile.ZipFile(self.zippath, 'r') as z: files = [i.filename for i in z.infolist()] for f in files: base, ext = os.path.splitext(f) if ext != ".py": continue paths = base.split('/') if len(paths) > 0 and paths[len(paths) - 1] == "__init__": paths.pop() self.packages.add('.'.join(paths)) try: pkg_path = '.'.join(paths) self.contents[pkg_path] = z.read(f).decode('utf-8') self.filenames[pkg_path] = f except UnicodeDecodeError: print(f, "in", self.zippath, "is not utf-8 encoded, unable to load plugin") continue while len(paths) > 1: paths.pop() parent = '.'.join(paths) if parent not in self.contents: self.contents[parent] = "" self.filenames[parent] = parent self.packages.add(parent) except (Exception) as e: print("Error loading %s:" % self.zippath, e) override_path = None multi_importer = MultizipImporter() sys.meta_path.insert(0, multi_importer) def update_compressed_packages(pkgs): multi_importer.loaders = [] for p in pkgs: try: multi_importer.loaders.append(ZipLoader(p)) except (FileNotFoundError, zipfile.BadZipFile) as e: print("error loading " + p + ": " + str(e)) def set_override_path(path): global override_path override_path = path
"""Helpers for script and condition tracing.""" from __future__ import annotations from collections import deque from collections.abc import Generator from contextlib import contextmanager from contextvars import ContextVar from functools import wraps from typing import Any, Callable, cast from homeassistant.helpers.typing import TemplateVarsType import homeassistant.util.dt as dt_util class TraceElement: """Container for trace data.""" def __init__(self, variables: TemplateVarsType, path: str) -> None: """Container for trace data.""" self._child_key: tuple[str, str] | None = None self._child_run_id: str | None = None self._error: Exception | None = None self.path: str = path self._result: dict[str, Any] | None = None self.reuse_by_child = False self._timestamp = dt_util.utcnow() if variables is None: variables = {} last_variables = variables_cv.get() or {} variables_cv.set(dict(variables)) changed_variables = { key: value for key, value in variables.items() if key not in last_variables or last_variables[key] != value } self._variables = changed_variables def __repr__(self) -> str: """Container for trace data.""" return str(self.as_dict()) def set_child_id(self, child_key: tuple[str, str], child_run_id: str) -> None: """Set trace id of a nested script run.""" self._child_key = child_key self._child_run_id = child_run_id def set_error(self, ex: Exception) -> None: """Set error.""" self._error = ex def set_result(self, **kwargs: Any) -> None: """Set result.""" self._result = {**kwargs} def update_result(self, **kwargs: Any) -> None: """Set result.""" old_result = self._result or {} self._result = {**old_result, **kwargs} def as_dict(self) -> dict[str, Any]: """Return dictionary version of this TraceElement.""" result: dict[str, Any] = {"path": self.path, "timestamp": self._timestamp} if self._child_key is not None: result["child_id"] = { "domain": self._child_key[0], "item_id": self._child_key[1], "run_id": str(self._child_run_id), } if self._variables: result["changed_variables"] = self._variables if self._error is not None: result["error"] = str(self._error) if self._result is not None: result["result"] = self._result return result # Context variables for tracing # Current trace trace_cv: ContextVar[dict[str, deque[TraceElement]] | None] = ContextVar( "trace_cv", default=None ) # Stack of TraceElements trace_stack_cv: ContextVar[list[TraceElement] | None] = ContextVar( "trace_stack_cv", default=None ) # Current location in config tree trace_path_stack_cv: ContextVar[list[str] | None] = ContextVar( "trace_path_stack_cv", default=None ) # Copy of last variables variables_cv: ContextVar[Any | None] = ContextVar("variables_cv", default=None) # (domain, item_id) + Run ID trace_id_cv: ContextVar[tuple[tuple[str, str], str] | None] = ContextVar( "trace_id_cv", default=None ) # Reason for stopped script execution script_execution_cv: ContextVar[StopReason | None] = ContextVar( "script_execution_cv", default=None ) def trace_id_set(trace_id: tuple[tuple[str, str], str]) -> None: """Set id of the current trace.""" trace_id_cv.set(trace_id) def trace_id_get() -> tuple[tuple[str, str], str] | None: """Get id if the current trace.""" return trace_id_cv.get() def trace_stack_push(trace_stack_var: ContextVar, node: Any) -> None: """Push an element to the top of a trace stack.""" trace_stack = trace_stack_var.get() if trace_stack is None: trace_stack = [] trace_stack_var.set(trace_stack) trace_stack.append(node) def trace_stack_pop(trace_stack_var: ContextVar) -> None: """Remove the top element from a trace stack.""" trace_stack = trace_stack_var.get() trace_stack.pop() def trace_stack_top(trace_stack_var: ContextVar) -> Any | None: """Return the element at the top of a trace stack.""" trace_stack = trace_stack_var.get() return trace_stack[-1] if trace_stack else None def trace_path_push(suffix: str | list[str]) -> int: """Go deeper in the config tree.""" if isinstance(suffix, str): suffix = [suffix] for node in suffix: trace_stack_push(trace_path_stack_cv, node) return len(suffix) def trace_path_pop(count: int) -> None: """Go n levels up in the config tree.""" for _ in range(count): trace_stack_pop(trace_path_stack_cv) def trace_path_get() -> str: """Return a string representing the current location in the config tree.""" path = trace_path_stack_cv.get() if not path: return "" return "/".join(path) def trace_append_element( trace_element: TraceElement, maxlen: int | None = None, ) -> None: """Append a TraceElement to trace[path].""" path = trace_element.path trace = trace_cv.get() if trace is None: trace = {} trace_cv.set(trace) if path not in trace: trace[path] = deque(maxlen=maxlen) trace[path].append(trace_element) def trace_get(clear: bool = True) -> dict[str, deque[TraceElement]] | None: """Return the current trace.""" if clear: trace_clear() return trace_cv.get() def trace_clear() -> None: """Clear the trace.""" trace_cv.set({}) trace_stack_cv.set(None) trace_path_stack_cv.set(None) variables_cv.set(None) script_execution_cv.set(StopReason()) def trace_set_child_id(child_key: tuple[str, str], child_run_id: str) -> None: """Set child trace_id of TraceElement at the top of the stack.""" node = cast(TraceElement, trace_stack_top(trace_stack_cv)) if node: node.set_child_id(child_key, child_run_id) def trace_set_result(**kwargs: Any) -> None: """Set the result of TraceElement at the top of the stack.""" node = cast(TraceElement, trace_stack_top(trace_stack_cv)) node.set_result(**kwargs) def trace_update_result(**kwargs: Any) -> None: """Update the result of TraceElement at the top of the stack.""" node = cast(TraceElement, trace_stack_top(trace_stack_cv)) node.update_result(**kwargs) class StopReason: """Mutable container class for script_execution.""" script_execution: str | None = None def script_execution_set(reason: str) -> None: """Set stop reason.""" data = script_execution_cv.get() if data is None: return data.script_execution = reason def script_execution_get() -> str | None: """Return the current trace.""" data = script_execution_cv.get() if data is None: return None return data.script_execution @contextmanager def trace_path(suffix: str | list[str]) -> Generator: """Go deeper in the config tree. Can not be used as a decorator on couroutine functions. """ count = trace_path_push(suffix) try: yield finally: trace_path_pop(count) def async_trace_path(suffix: str | list[str]) -> Callable: """Go deeper in the config tree. To be used as a decorator on coroutine functions. """ def _trace_path_decorator(func: Callable) -> Callable: """Decorate a coroutine function.""" @wraps(func) async def async_wrapper(*args: Any) -> None: """Catch and log exception.""" with trace_path(suffix): await func(*args) return async_wrapper return _trace_path_decorator
#Copyright Nick Prowse 2017. Code Licenced under Apache 2. #Version 11. 21/10/2017. #Programmed & tested in Python 2.76 only #This program tests factorise.py - time for factorising first 1*10^20 numbers. #Results are: Number input, average_time of factorisations, cumulative_time of factorisations, and total time for algorithm. #Ensure that "prime_list_path" and "prime_list_filename" are edited for the location of prime list file used in factorise.py file. #It has been tested on Linux Mint v3.19 x64 using a prime list with primes upto 9,999,997. #For M = 10^4, cumulative_time is < 0.1 seconds, and total algorithm time is < 2 seconds. #For M = 10^5, cumulative_time is < 1.9 seconds, and total algorithm time is < 60 seconds. import sys import math import os.path #import cProfile #import re import time import factorise def main(): prime_list_path="/home/mint/Desktop/" prime_list_filename="primes_upto_10000000.csv" primefile=prime_list_path + prime_list_filename print("Copyright Nick Prowse 2017. Code Licenced under Apache 2.") print("Version 11. 21/10/2017.") print("Programmed & tested in Python 2.76 only.") print("---------------------------------------------------------------------") print("This program tests factorise.py - time for factorising first M numbers supplied by user.") print("Results printed are stored in 2 lists - first for Number factorisations done upto, second for cumulatative time taken to factorise those numbers, third for average time.") print("Prime list file should be a .CSV file with each prime separated by commas.") print("Ensure that \"prime_list_path\" and \"prime_list_filename\" are edited for the location of prime list file used in factorise.py file.") print("It has been tested on Linux Mint v3.19 x64 using a prime list with primes upto 9,999,997.") print("---------------------------------------------------------------------") print('what number do you want to test factorisations upto?') M = raw_input() s1 = time.clock() #print('s1 is: '+str(s1)) print('Number input is: '+str(M)) #print('type of M is: '+str(type(M))) #Convert M to a long M=long(M) s_before_number_checks = time.clock() #Simple Checks for M: print("Running checks on "+str(M)) if M==0: print('Number entered is 0. Please choose another number.') sys.exit() if M==1: print('1 is not a prime. Please choose another number.') sys.exit() if M<0: print('Number entered is negative. Please enter another number') sys.exit() c_number_checks = time.clock() - s_before_number_checks #Check if primefile exists. print('Checking if Prime file exists in location specified..') if os.path.exists(primefile) is False: #File doesn't exist in location. Exit process. print('Prime file doesn\'t exist in location specified. Exiting.') sys.exit() #only import primes <= sqrt(M) # Largest number to consider primes upto is floor_sqrt_M print('Calculating floor of square root of '+str(M)+'..') s_before_floor_sqrt_M= time.clock() floor_sqrt_M=int(math.floor(math.sqrt(M))) c_floor_sqrt_M = time.clock() - s_before_floor_sqrt_M # Want to limit highest prime imported from csv to be <= math.sqrt(M) print('Storing primes upto '+str(floor_sqrt_M)+' from csvfile..') s_before_primes = time.clock() primes=factorise.csvfile_store_primes(primefile,floor_sqrt_M) c_primes = int(10000 * (time.clock() - s_before_primes)) / 10000.0 print('Running factorise_test ..') s_before_factorise_test = time.clock() result=factorise_test(M,primes) c_factorise_test = int(10000 * (time.clock() - s_before_factorise_test)) / 10000.0 print('Collating results..') cumulative_time=int(10000 * result[0]) / 10000.0 #need to 4.d.p average_time=result[1] #no need to round check=result[2] cumul_check_time=int(10000 * result[3]) / 10000.0 #need to 4.d.p c_forloop=int(10000 * result[4]) / 10000.0 #need to 4.d.p cumul_add1toN=int(10000 * result[5]) / 10000.0 #need to 4.d.p cumul_R=int(10000 * result[6]) / 10000.0 #need to 4.d.p c_definerange=int(10000 * result[7]) / 10000.0 #need to 4.d.p cumul_pp_time=int(10000 * result[8]) / 10000.0 #need to 4.d.p cumul_nrem_time=int(10000 * result[9]) / 10000.0 #need to 4.d.p c_total_time = int(10000 * (time.clock() - s1)) / 10000.0 #need to 4.d.p print('-----------------------------') print('M is: '+str(M)) print('Total time for main() algorithm to run: '+str(c_total_time)+' seconds') print('-----------------------------') #print('Total time taken for number_checks: '+str(c_number_checks)+' seconds') #print('Total time taken for floor_sqrt_M: '+str(c_floor_sqrt_M)+' seconds') print('Total time taken for prime list creation: '+str(c_primes)+' seconds') print('Total time taken for factorise_test(): '+str(c_factorise_test)+' seconds') print('-----------------------------') #print('Total time taken for define range in factorise_test(): '+str(c_definerange)+' seconds') print('Total time taken for for loop in factorise_test(): '+str(c_forloop)+' seconds') print('Total time taken for R calculation in factorise_test(): '+str(cumul_R)+' seconds') print('Total time taken for adding 1 to N in factorise_test(): '+str(cumul_add1toN)+' seconds') print('-----------------------------') print('Cumulative factorisation time is: '+str(cumulative_time)+' seconds') print('Average factorisation time is: '+str(average_time)+' seconds') #print('-----------------------------') #print('Total time taken - for loop - checks in factorise_test(): '+str(cumul_check_time)+' seconds') print('-----------------------------') print('Total time taken - calc primes powers in calc_primes_powers_remainder(): '+str(cumul_pp_time)+' seconds') print('Total time taken - N remainder in calc_primes_powers_remainder(): '+str(cumul_nrem_time)+' seconds') print('-----------------------------') print('Numbers that took >= 0.0003s to try to factorise are: '+str(check)) def factorise_test(M,primes): times=[] check=[] N=2 cumulative_time=0 r=0 #cumul_ls_time=0 #cumul_fs_time=0 #cumul_ft_time=0 cumul_check_time=0 cumul_R=0 cumul_add1toN=0 cumul_pp_time=0 cumul_nrem_time=0 print("Looping through values from 2 to "+str(M)+"..") s_definerange = time.clock() number_range=tuple(xrange(2,M)) c_definerange = time.clock() - s_definerange s_forloop = time.clock() for N in number_range: #print('N is now: '+str(N)) s1 = time.clock() b=factorise.factorise(N,primes) c_pp=b[5] cumul_pp_time = cumul_pp_time + c_pp c_nrem=b[6] cumul_nrem_time = cumul_nrem_time + c_nrem c1=time.clock() - s1 cumulative_time = cumulative_time + c1 s_check=time.clock() if c1 >= 0.0003: #append N to check[] list - values to look at check.append(N) #print('check is now: '+str(check)) c_check = time.clock() - s_check cumul_check_time = cumul_check_time + c_check s_R = time.clock() R = float(N) / M c_R = time.clock() - s_R cumul_R = cumul_R + c_R #update_progress(R) s_add1toN = time.clock() N = N + 1 c_add1toN = time.clock() - s_check cumul_add1toN = cumul_add1toN + c_add1toN #raw_input('waiting for user to continue..') c_forloop = time.clock() - s_forloop average_time = cumulative_time / (M-1) return cumulative_time, average_time, check, cumul_check_time, c_forloop, cumul_R, cumul_add1toN, c_definerange, cumul_pp_time, cumul_nrem_time if __name__=='__main__': main() #def update_progress(progress): # barLength=2 # status="" # if isinstance(progress,int): # progress=float(progress) # if not isinstance(progress, float): # progress=0 # status="error: process variable must be a float\r\n" # if progress < 0: # progress = 0 # status="Halt...\r\n" # if progress >= 1: # progress = 1 # status="Done...\r\n" # block = int(round(barLength*progress)) # text = "\rPercent: [{0}] {1}% {2}".format("="*block + " "*(barLength-block), progress*100, status) # return text #def drawProgressBar(percent, barLen = 10): # sys.stdout.write('\r') # progress="" # for i in xrange(barLen): # if i < int(barLen * percent): # progress += "=" # else: # progress += " " # sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100)) # sys.stdout.flush() #sys.stdout.write('\r') #sys.stdout.write("[%-20s] %d%%" % ('='*R, 5*R)) #sys.stdout.flush() #if r>0: # #Write over previous line and call drawProgressBar # sys.stdout.write('\r') # drawProgressBar(R,M) # r=r+1 #else: # #Don't call drawProgressBar on first run # sys.stdout.write('\r') # r=r+1 # M is 10 #total=M-1 # total = 9 #print('total is: '+str(total)) #point=float(total)/100 #point = 0.09 #print('point is: '+str(point)) #increment=float(total)/10 #increment = 0.9 #print('increment is: '+str(increment)) #for i in xrange(total): # xrange(9) # if (i % (5 * point) == 0): # #1st loop i=0, 5*point= # sys.stdout.write("\r[" + "=" * (i / increment)+ " " * ((total - i)/increment) + "]" + str(i / point) + "%")
from pyops.utils import is_elapsed_time, parse_time, getMonth import pandas as pd from datetime import datetime import os class EVF: def __init__(self, fname): # Variable initialization self.WTF = list() self.meta = dict() self.header = list() self.ref_date = None self.init_values = list() self.include_files = list() self.propagation_delay = None # Loading the given file self.load(fname) def load(self, fname): # Storing the name of the file for editting purposes self.fname = fname # Auxiliary dictionary to speed up the data convertion into pandas aux_dict = dict(raw_time=[], time=[], event=[], experiment=[], item=[], count=[], comment=[]) # Importing the file out_ouf_metadata = False with open(fname) as f: for line in f: if '\n' in line[0]: pass # Filtering lines with comments elif '#' in line[0]: if not out_ouf_metadata: self.header.append(line) self._read_metada(line) else: self.WTF.append(line) # Storing events elif is_elapsed_time(line.split()[0]): aux_dict = self._read_events(line, aux_dict) # Useful data from the header else: # We can say we are out of the metadate here because # start_time and end_time are mandatory in the files out_ouf_metadata = True self._read_header_line(line.split()) # Closing the file f.close() # Creating the pandas dataframe self.events = pd.DataFrame(aux_dict) # Sorting by the time self.events = self.events.sort(['time']) # Sorting the columns in the dataframe cols = ['raw_time', 'time', 'event', 'experiment', 'item', 'count', 'comment'] self.events = self.events[cols] def _read_metada(self, line): if ': ' in line: self.meta[line[1:line.index(': ')].strip()] = \ line[line.index(': ') + 1:-1].strip() def _read_events(self, line, aux_dict): # Storing comments if '#' in line: index = line.index('#') aux_dict['comment'].append(line[index:-1]) else: aux_dict['comment'].append(None) # Consecutive whitespace are regarded as a single separator l = line.split() aux_dict['raw_time'].append(l[0]) aux_dict['time'].append(self._to_datetime(l[0])) aux_dict['event'].append(l[1]) l = [e.upper() for e in line.split()] if 'ITEM' in l: # In the file it should be: EXP = <experiment> ITEM = <item> aux_dict['experiment'].append(l[l.index('ITEM') - 1]) # In the file it should be: ITEM = <item> aux_dict['item'].append(l[l.index('ITEM') + 2]) # Removing last parenthesis if exist if aux_dict['item'][-1][-1] == ')': aux_dict['item'][-1] = aux_dict['item'][-1][:-1] if '#' in aux_dict['item'][-1]: aux_dict['item'][-1] = \ aux_dict['item'][-1][:aux_dict['item'][-1].index('#') - 1] else: # Storing empty values aux_dict['experiment'].append(None) aux_dict['item'].append(None) if 'COUNT' in l or '(COUNT' in l: if 'COUNT' in l: # In the file it should be: COUNT = <count> aux_dict['count'].append(l[l.index('COUNT') + 2]) else: # In the file it should be: (COUNT = <count>) aux_dict['count'].append(l[l.index('(COUNT') + 2]) # Removing useless characters at the end if aux_dict['count'][-1][-1] == ')': aux_dict['count'][-1] = aux_dict['count'][-1][:-1] if '#' in aux_dict['count'][-1]: aux_dict['count'][-1] = \ aux_dict[ 'count'][-1][:aux_dict['count'][-1].index('#') - 1] else: aux_dict['count'].append(None) return aux_dict def _read_header_line(self, line): if 'Ref_date:' in line: # Storing them in "raw" format self.raw_ref_time = line[1] # Getting the reference date from the header and transforming it # into datetime format self.ref_date = self._ref_date_to_datetime(line[1]) elif 'Start_time:' in line: # Storing them in "raw" format self.raw_start_time = line[1] # Storing them in datetime format self.start_time = self._to_datetime(line[1]) elif 'End_time:' in line: # Storing them in "raw" format self.raw_end_time = line[1] # Storing them in datetime format self.end_time = self._to_datetime(line[1]) elif 'Propagation_delay:' in line: self.propagation_delay = line[1:] elif 'Init_value:' in line: # Storing them in "raw" format self.init_values.append(line[1:]) # Sometimes it appears as Include instead of Include_file elif 'Include_file:' in line or 'Include:' in line: self.include_files.append(line[1:]) def _ref_date_to_datetime(self, ref_date): ref_date = ref_date.split('-')[0] + "-" +\ str(getMonth(ref_date.split('-')[1])) + "-" + \ ref_date.split('-')[2] return datetime.strptime(ref_date, "%d-%m-%Y") def _to_datetime(self, element): if self.ref_date is None and '-' not in element: return parse_time(element) else: if '-' in element: date = self._ref_date_to_datetime(element.split('_')[0]) return parse_time("000_" + element.split('_')[1], date) return parse_time(element, self.ref_date) def to_file(self, fname): # Creating file if the file doesn't exist and truncating it if exists with open(fname, 'w') as f: # Copying the header [f.write(line) for line in self.header] # Copying the useful data in the header # Reg_date if self.ref_date is not None: f.write("Ref_date: " + self.raw_ref_time + "\n#\n") # Start and End time f.write("Start_time: " + self.raw_start_time + "\n") f.write("End_time: " + self.raw_end_time + "\n#\n") # Propagation delay if self.propagation_delay is not None: output = "" for element in self.propagation_delay: output += " " + element f.write("Propagation_delay: " + output + "\n#\n") # Init values if len(self.init_values) > 0: for value in self.init_values: output = "" for element in value: output += " " + element f.write("Init_value: " + output + "\n") f.write("#\n") # Include files if len(self.include_files) > 0: for include in self.include_files: output = "" for element in include: output += " " + element f.write("Include_file: " + output + "\n") f.write("#\n") # Copying events f.write("# Events_in_list: " + str(len(self.events.index)) + "\n#\n") f.write("# Time Event\n#\n") for index, row in self.events.iterrows(): output = row['raw_time'] + " " + row['event'] if row['experiment'] is not None: output += " (EXP = " + row['experiment'] + " " output += "ITEM = " + row['item'] + ")" if row['count'] is not None: output += " (COUNT = " + row['count'] + ")" if row['comment'] is not None: output += " # " + row['comment'] output += "\n" f.write(output) f.write("#\n") f.close() def check_consistency(self): if self.events['time'].min() < self.start_time: print ("There is an time event before the official start_time") print (self.events['time'].min() + " is before than " + self.start_time) raise NameError('Events before start_time') elif self.events['time'].max() > self.end_time: print ("There is an time event after the official end_time") print (self.events['time'].max() + " is after than " + self.end_time) raise NameError('Events after end_time') elif self.check_if_included_files_exist_in_directory(): print ("Everything seems to be ok, congratulations! :)") def check_if_included_files_exist_in_directory(self): files_exist = True # Getting the path of the directory where we are working path = os.path.dirname(os.path.abspath(self.fname)) for fname in self.include_files: # Removing possible problematic characters fname = fname[0].strip('"') if not os.path.isfile(os.path.join(path, fname)): files_exist = False output = "It seems as if " + fname + "is not in the same " output += "directory as " + os.path.basename(self.fname) print (output) # Perhaps raising an exception here in the future... return files_exist
from __future__ import unicode_literals import copy import datetime import boto.redshift from botocore.exceptions import ClientError from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( ClusterNotFoundError, ClusterParameterGroupNotFoundError, ClusterSecurityGroupNotFoundError, ClusterSnapshotAlreadyExistsError, ClusterSnapshotNotFoundError, ClusterSubnetGroupNotFoundError, InvalidParameterValueError, InvalidSubnetError, ResourceNotFoundFaultError, SnapshotCopyAlreadyDisabledFaultError, SnapshotCopyAlreadyEnabledFaultError, SnapshotCopyDisabledFaultError, SnapshotCopyGrantAlreadyExistsFaultError, SnapshotCopyGrantNotFoundFaultError, ) ACCOUNT_ID = 123456789012 class TaggableResourceMixin(object): resource_type = None def __init__(self, region_name, tags): self.region = region_name self.tags = tags or [] @property def resource_id(self): return None @property def arn(self): return "arn:aws:redshift:{region}:{account_id}:{resource_type}:{resource_id}".format( region=self.region, account_id=ACCOUNT_ID, resource_type=self.resource_type, resource_id=self.resource_id) def create_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] self.tags.extend(tags) return self.tags def delete_tags(self, tag_keys): self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] return self.tags class Cluster(TaggableResourceMixin, BaseModel): resource_type = 'cluster' def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, master_user_password, db_name, cluster_type, cluster_security_groups, vpc_security_group_ids, cluster_subnet_group_name, availability_zone, preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, encrypted, region_name, tags=None, iam_roles_arn=None, restored_from_snapshot=False): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.status = 'available' self.node_type = node_type self.master_username = master_username self.master_user_password = master_user_password self.db_name = db_name if db_name else "dev" self.vpc_security_group_ids = vpc_security_group_ids self.cluster_subnet_group_name = cluster_subnet_group_name self.publicly_accessible = publicly_accessible self.encrypted = encrypted self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" self.port = int(port) if port else 5439 self.automated_snapshot_retention_period = int( automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30" if cluster_parameter_group_name: self.cluster_parameter_group_name = [cluster_parameter_group_name] else: self.cluster_parameter_group_name = ['default.redshift-1.0'] if cluster_security_groups: self.cluster_security_groups = cluster_security_groups else: self.cluster_security_groups = ["Default"] if availability_zone: self.availability_zone = availability_zone else: # This could probably be smarter, but there doesn't appear to be a # way to pull AZs for a region in boto self.availability_zone = region_name + "a" if cluster_type == 'single-node': self.number_of_nodes = 1 elif number_of_nodes: self.number_of_nodes = int(number_of_nodes) else: self.number_of_nodes = 1 self.iam_roles_arn = iam_roles_arn or [] self.restored_from_snapshot = restored_from_snapshot @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): redshift_backend = redshift_backends[region_name] properties = cloudformation_json['Properties'] if 'ClusterSubnetGroupName' in properties: subnet_group_name = properties[ 'ClusterSubnetGroupName'].cluster_subnet_group_name else: subnet_group_name = None cluster = redshift_backend.create_cluster( cluster_identifier=resource_name, node_type=properties.get('NodeType'), master_username=properties.get('MasterUsername'), master_user_password=properties.get('MasterUserPassword'), db_name=properties.get('DBName'), cluster_type=properties.get('ClusterType'), cluster_security_groups=properties.get( 'ClusterSecurityGroups', []), vpc_security_group_ids=properties.get('VpcSecurityGroupIds', []), cluster_subnet_group_name=subnet_group_name, availability_zone=properties.get('AvailabilityZone'), preferred_maintenance_window=properties.get( 'PreferredMaintenanceWindow'), cluster_parameter_group_name=properties.get( 'ClusterParameterGroupName'), automated_snapshot_retention_period=properties.get( 'AutomatedSnapshotRetentionPeriod'), port=properties.get('Port'), cluster_version=properties.get('ClusterVersion'), allow_version_upgrade=properties.get('AllowVersionUpgrade'), number_of_nodes=properties.get('NumberOfNodes'), publicly_accessible=properties.get("PubliclyAccessible"), encrypted=properties.get("Encrypted"), region_name=region_name, ) return cluster def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Endpoint.Address': return self.endpoint elif attribute_name == 'Endpoint.Port': return self.port raise UnformattedGetAttTemplateException() @property def endpoint(self): return "{0}.cg034hpkmmjt.{1}.redshift.amazonaws.com".format( self.cluster_identifier, self.region, ) @property def security_groups(self): return [ security_group for security_group in self.redshift_backend.describe_cluster_security_groups() if security_group.cluster_security_group_name in self.cluster_security_groups ] @property def vpc_security_groups(self): return [ security_group for security_group in self.redshift_backend.ec2_backend.describe_security_groups() if security_group.id in self.vpc_security_group_ids ] @property def parameter_groups(self): return [ parameter_group for parameter_group in self.redshift_backend.describe_cluster_parameter_groups() if parameter_group.cluster_parameter_group_name in self.cluster_parameter_group_name ] @property def resource_id(self): return self.cluster_identifier def to_json(self): json_response = { "MasterUsername": self.master_username, "MasterUserPassword": "****", "ClusterVersion": self.cluster_version, "VpcSecurityGroups": [{ "Status": "active", "VpcSecurityGroupId": group.id } for group in self.vpc_security_groups], "ClusterSubnetGroupName": self.cluster_subnet_group_name, "AvailabilityZone": self.availability_zone, "ClusterStatus": self.status, "NumberOfNodes": self.number_of_nodes, "AutomatedSnapshotRetentionPeriod": self.automated_snapshot_retention_period, "PubliclyAccessible": self.publicly_accessible, "Encrypted": self.encrypted, "DBName": self.db_name, "PreferredMaintenanceWindow": self.preferred_maintenance_window, "ClusterParameterGroups": [{ "ParameterApplyStatus": "in-sync", "ParameterGroupName": group.cluster_parameter_group_name, } for group in self.parameter_groups], "ClusterSecurityGroups": [{ "Status": "active", "ClusterSecurityGroupName": group.cluster_security_group_name, } for group in self.security_groups], "Port": self.port, "NodeType": self.node_type, "ClusterIdentifier": self.cluster_identifier, "AllowVersionUpgrade": self.allow_version_upgrade, "Endpoint": { "Address": self.endpoint, "Port": self.port }, 'ClusterCreateTime': self.create_time, "PendingModifiedValues": [], "Tags": self.tags, "IamRoles": [{ "ApplyStatus": "in-sync", "IamRoleArn": iam_role_arn } for iam_role_arn in self.iam_roles_arn] } if self.restored_from_snapshot: json_response['RestoreStatus'] = { 'Status': 'completed', 'CurrentRestoreRateInMegaBytesPerSecond': 123.0, 'SnapshotSizeInMegaBytes': 123, 'ProgressInMegaBytes': 123, 'ElapsedTimeInSeconds': 123, 'EstimatedTimeToCompletionInSeconds': 123 } try: json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status except AttributeError: pass return json_response class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): resource_type = 'snapshotcopygrant' def __init__(self, snapshot_copy_grant_name, kms_key_id): self.snapshot_copy_grant_name = snapshot_copy_grant_name self.kms_key_id = kms_key_id def to_json(self): return { "SnapshotCopyGrantName": self.snapshot_copy_grant_name, "KmsKeyId": self.kms_key_id } class SubnetGroup(TaggableResourceMixin, BaseModel): resource_type = 'subnetgroup' def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids, region_name, tags=None): super(SubnetGroup, self).__init__(region_name, tags) self.ec2_backend = ec2_backend self.cluster_subnet_group_name = cluster_subnet_group_name self.description = description self.subnet_ids = subnet_ids if not self.subnets: raise InvalidSubnetError(subnet_ids) @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): redshift_backend = redshift_backends[region_name] properties = cloudformation_json['Properties'] subnet_group = redshift_backend.create_cluster_subnet_group( cluster_subnet_group_name=resource_name, description=properties.get("Description"), subnet_ids=properties.get("SubnetIds", []), region_name=region_name ) return subnet_group @property def subnets(self): return self.ec2_backend.get_all_subnets(filters={'subnet-id': self.subnet_ids}) @property def vpc_id(self): return self.subnets[0].vpc_id @property def resource_id(self): return self.cluster_subnet_group_name def to_json(self): return { "VpcId": self.vpc_id, "Description": self.description, "ClusterSubnetGroupName": self.cluster_subnet_group_name, "SubnetGroupStatus": "Complete", "Subnets": [{ "SubnetStatus": "Active", "SubnetIdentifier": subnet.id, "SubnetAvailabilityZone": { "Name": subnet.availability_zone }, } for subnet in self.subnets], "Tags": self.tags } class SecurityGroup(TaggableResourceMixin, BaseModel): resource_type = 'securitygroup' def __init__(self, cluster_security_group_name, description, region_name, tags=None): super(SecurityGroup, self).__init__(region_name, tags) self.cluster_security_group_name = cluster_security_group_name self.description = description @property def resource_id(self): return self.cluster_security_group_name def to_json(self): return { "EC2SecurityGroups": [], "IPRanges": [], "Description": self.description, "ClusterSecurityGroupName": self.cluster_security_group_name, "Tags": self.tags } class ParameterGroup(TaggableResourceMixin, BaseModel): resource_type = 'parametergroup' def __init__(self, cluster_parameter_group_name, group_family, description, region_name, tags=None): super(ParameterGroup, self).__init__(region_name, tags) self.cluster_parameter_group_name = cluster_parameter_group_name self.group_family = group_family self.description = description @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): redshift_backend = redshift_backends[region_name] properties = cloudformation_json['Properties'] parameter_group = redshift_backend.create_cluster_parameter_group( cluster_parameter_group_name=resource_name, description=properties.get("Description"), group_family=properties.get("ParameterGroupFamily"), region_name=region_name ) return parameter_group @property def resource_id(self): return self.cluster_parameter_group_name def to_json(self): return { "ParameterGroupFamily": self.group_family, "Description": self.description, "ParameterGroupName": self.cluster_parameter_group_name, "Tags": self.tags } class Snapshot(TaggableResourceMixin, BaseModel): resource_type = 'snapshot' def __init__(self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=None): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier self.snapshot_type = 'manual' self.status = 'available' self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) self.iam_roles_arn = iam_roles_arn or [] @property def resource_id(self): return "{cluster_id}/{snapshot_id}".format( cluster_id=self.cluster.cluster_identifier, snapshot_id=self.snapshot_identifier) def to_json(self): return { 'SnapshotIdentifier': self.snapshot_identifier, 'ClusterIdentifier': self.cluster.cluster_identifier, 'SnapshotCreateTime': self.create_time, 'Status': self.status, 'Port': self.cluster.port, 'AvailabilityZone': self.cluster.availability_zone, 'MasterUsername': self.cluster.master_username, 'ClusterVersion': self.cluster.cluster_version, 'SnapshotType': self.snapshot_type, 'NodeType': self.cluster.node_type, 'NumberOfNodes': self.cluster.number_of_nodes, 'DBName': self.cluster.db_name, 'Tags': self.tags, "IamRoles": [{ "ApplyStatus": "in-sync", "IamRoleArn": iam_role_arn } for iam_role_arn in self.iam_roles_arn] } class RedshiftBackend(BaseBackend): def __init__(self, ec2_backend, region_name): self.region = region_name self.clusters = {} self.subnet_groups = {} self.security_groups = { "Default": SecurityGroup("Default", "Default Redshift Security Group", self.region) } self.parameter_groups = { "default.redshift-1.0": ParameterGroup( "default.redshift-1.0", "redshift-1.0", "Default Redshift parameter group", self.region ) } self.ec2_backend = ec2_backend self.snapshots = OrderedDict() self.RESOURCE_TYPE_MAP = { 'cluster': self.clusters, 'parametergroup': self.parameter_groups, 'securitygroup': self.security_groups, 'snapshot': self.snapshots, 'subnetgroup': self.subnet_groups } self.snapshot_copy_grants = {} def reset(self): ec2_backend = self.ec2_backend region_name = self.region self.__dict__ = {} self.__init__(ec2_backend, region_name) def enable_snapshot_copy(self, **kwargs): cluster_identifier = kwargs['cluster_identifier'] cluster = self.clusters[cluster_identifier] if not hasattr(cluster, 'cluster_snapshot_copy_status'): if cluster.encrypted == 'true' and kwargs['snapshot_copy_grant_name'] is None: raise ClientError( 'InvalidParameterValue', 'SnapshotCopyGrantName is required for Snapshot Copy ' 'on KMS encrypted clusters.' ) status = { 'DestinationRegion': kwargs['destination_region'], 'RetentionPeriod': kwargs['retention_period'], 'SnapshotCopyGrantName': kwargs['snapshot_copy_grant_name'], } cluster.cluster_snapshot_copy_status = status return cluster else: raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier) def disable_snapshot_copy(self, **kwargs): cluster_identifier = kwargs['cluster_identifier'] cluster = self.clusters[cluster_identifier] if hasattr(cluster, 'cluster_snapshot_copy_status'): del cluster.cluster_snapshot_copy_status return cluster else: raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier) def modify_snapshot_copy_retention_period(self, cluster_identifier, retention_period): cluster = self.clusters[cluster_identifier] if hasattr(cluster, 'cluster_snapshot_copy_status'): cluster.cluster_snapshot_copy_status['RetentionPeriod'] = retention_period return cluster else: raise SnapshotCopyDisabledFaultError(cluster_identifier) def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs['cluster_identifier'] cluster = Cluster(self, **cluster_kwargs) self.clusters[cluster_identifier] = cluster return cluster def describe_clusters(self, cluster_identifier=None): clusters = self.clusters.values() if cluster_identifier: if cluster_identifier in self.clusters: return [self.clusters[cluster_identifier]] else: raise ClusterNotFoundError(cluster_identifier) return clusters def modify_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs.pop('cluster_identifier') new_cluster_identifier = cluster_kwargs.pop( 'new_cluster_identifier', None) cluster = self.describe_clusters(cluster_identifier)[0] for key, value in cluster_kwargs.items(): setattr(cluster, key, value) if new_cluster_identifier: self.delete_cluster(cluster_identifier) cluster.cluster_identifier = new_cluster_identifier self.clusters[new_cluster_identifier] = cluster return cluster def delete_cluster(self, cluster_identifier): if cluster_identifier in self.clusters: return self.clusters.pop(cluster_identifier) raise ClusterNotFoundError(cluster_identifier) def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids, region_name, tags=None): subnet_group = SubnetGroup( self.ec2_backend, cluster_subnet_group_name, description, subnet_ids, region_name, tags) self.subnet_groups[cluster_subnet_group_name] = subnet_group return subnet_group def describe_cluster_subnet_groups(self, subnet_identifier=None): subnet_groups = self.subnet_groups.values() if subnet_identifier: if subnet_identifier in self.subnet_groups: return [self.subnet_groups[subnet_identifier]] else: raise ClusterSubnetGroupNotFoundError(subnet_identifier) return subnet_groups def delete_cluster_subnet_group(self, subnet_identifier): if subnet_identifier in self.subnet_groups: return self.subnet_groups.pop(subnet_identifier) raise ClusterSubnetGroupNotFoundError(subnet_identifier) def create_cluster_security_group(self, cluster_security_group_name, description, region_name, tags=None): security_group = SecurityGroup( cluster_security_group_name, description, region_name, tags) self.security_groups[cluster_security_group_name] = security_group return security_group def describe_cluster_security_groups(self, security_group_name=None): security_groups = self.security_groups.values() if security_group_name: if security_group_name in self.security_groups: return [self.security_groups[security_group_name]] else: raise ClusterSecurityGroupNotFoundError(security_group_name) return security_groups def delete_cluster_security_group(self, security_group_identifier): if security_group_identifier in self.security_groups: return self.security_groups.pop(security_group_identifier) raise ClusterSecurityGroupNotFoundError(security_group_identifier) def create_cluster_parameter_group(self, cluster_parameter_group_name, group_family, description, region_name, tags=None): parameter_group = ParameterGroup( cluster_parameter_group_name, group_family, description, region_name, tags) self.parameter_groups[cluster_parameter_group_name] = parameter_group return parameter_group def describe_cluster_parameter_groups(self, parameter_group_name=None): parameter_groups = self.parameter_groups.values() if parameter_group_name: if parameter_group_name in self.parameter_groups: return [self.parameter_groups[parameter_group_name]] else: raise ClusterParameterGroupNotFoundError(parameter_group_name) return parameter_groups def delete_cluster_parameter_group(self, parameter_group_name): if parameter_group_name in self.parameter_groups: return self.parameter_groups.pop(parameter_group_name) raise ClusterParameterGroupNotFoundError(parameter_group_name) def create_cluster_snapshot(self, cluster_identifier, snapshot_identifier, region_name, tags): cluster = self.clusters.get(cluster_identifier) if not cluster: raise ClusterNotFoundError(cluster_identifier) if self.snapshots.get(snapshot_identifier) is not None: raise ClusterSnapshotAlreadyExistsError(snapshot_identifier) snapshot = Snapshot(cluster, snapshot_identifier, region_name, tags) self.snapshots[snapshot_identifier] = snapshot return snapshot def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None): if cluster_identifier: for snapshot in self.snapshots.values(): if snapshot.cluster.cluster_identifier == cluster_identifier: return [snapshot] raise ClusterNotFoundError(cluster_identifier) if snapshot_identifier: if snapshot_identifier in self.snapshots: return [self.snapshots[snapshot_identifier]] raise ClusterSnapshotNotFoundError(snapshot_identifier) return self.snapshots.values() def delete_cluster_snapshot(self, snapshot_identifier): if snapshot_identifier not in self.snapshots: raise ClusterSnapshotNotFoundError(snapshot_identifier) deleted_snapshot = self.snapshots.pop(snapshot_identifier) deleted_snapshot.status = 'deleted' return deleted_snapshot def restore_from_cluster_snapshot(self, **kwargs): snapshot_identifier = kwargs.pop('snapshot_identifier') snapshot = self.describe_cluster_snapshots(snapshot_identifier=snapshot_identifier)[0] create_kwargs = { "node_type": snapshot.cluster.node_type, "master_username": snapshot.cluster.master_username, "master_user_password": snapshot.cluster.master_user_password, "db_name": snapshot.cluster.db_name, "cluster_type": 'multi-node' if snapshot.cluster.number_of_nodes > 1 else 'single-node', "availability_zone": snapshot.cluster.availability_zone, "port": snapshot.cluster.port, "cluster_version": snapshot.cluster.cluster_version, "number_of_nodes": snapshot.cluster.number_of_nodes, "encrypted": snapshot.cluster.encrypted, "tags": snapshot.cluster.tags, "restored_from_snapshot": True } create_kwargs.update(kwargs) return self.create_cluster(**create_kwargs) def create_snapshot_copy_grant(self, **kwargs): snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] kms_key_id = kwargs['kms_key_id'] if snapshot_copy_grant_name not in self.snapshot_copy_grants: snapshot_copy_grant = SnapshotCopyGrant(snapshot_copy_grant_name, kms_key_id) self.snapshot_copy_grants[snapshot_copy_grant_name] = snapshot_copy_grant return snapshot_copy_grant raise SnapshotCopyGrantAlreadyExistsFaultError(snapshot_copy_grant_name) def delete_snapshot_copy_grant(self, **kwargs): snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] if snapshot_copy_grant_name in self.snapshot_copy_grants: return self.snapshot_copy_grants.pop(snapshot_copy_grant_name) raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) def describe_snapshot_copy_grants(self, **kwargs): copy_grants = self.snapshot_copy_grants.values() snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] if snapshot_copy_grant_name: if snapshot_copy_grant_name in self.snapshot_copy_grants: return [self.snapshot_copy_grants[snapshot_copy_grant_name]] else: raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) return copy_grants def _get_resource_from_arn(self, arn): try: arn_breakdown = arn.split(':') resource_type = arn_breakdown[5] if resource_type == 'snapshot': resource_id = arn_breakdown[6].split('/')[1] else: resource_id = arn_breakdown[6] except IndexError: resource_type = resource_id = arn resources = self.RESOURCE_TYPE_MAP.get(resource_type) if resources is None: message = ( "Tagging is not supported for this type of resource: '{0}' " "(the ARN is potentially malformed, please check the ARN " "documentation for more information)".format(resource_type)) raise ResourceNotFoundFaultError(message=message) try: resource = resources[resource_id] except KeyError: raise ResourceNotFoundFaultError(resource_type, resource_id) else: return resource @staticmethod def _describe_tags_for_resources(resources): tagged_resources = [] for resource in resources: for tag in resource.tags: data = { 'ResourceName': resource.arn, 'ResourceType': resource.resource_type, 'Tag': { 'Key': tag['Key'], 'Value': tag['Value'] } } tagged_resources.append(data) return tagged_resources def _describe_tags_for_resource_type(self, resource_type): resources = self.RESOURCE_TYPE_MAP.get(resource_type) if not resources: raise ResourceNotFoundFaultError(resource_type=resource_type) return self._describe_tags_for_resources(resources.values()) def _describe_tags_for_resource_name(self, resource_name): resource = self._get_resource_from_arn(resource_name) return self._describe_tags_for_resources([resource]) def create_tags(self, resource_name, tags): resource = self._get_resource_from_arn(resource_name) resource.create_tags(tags) def describe_tags(self, resource_name, resource_type): if resource_name and resource_type: raise InvalidParameterValueError( "You cannot filter a list of resources using an Amazon " "Resource Name (ARN) and a resource type together in the " "same request. Retry the request using either an ARN or " "a resource type, but not both.") if resource_type: return self._describe_tags_for_resource_type(resource_type.lower()) if resource_name: return self._describe_tags_for_resource_name(resource_name) # If name and type are not specified, return all tagged resources. # TODO: Implement aws marker pagination tagged_resources = [] for resource_type in self.RESOURCE_TYPE_MAP: try: tagged_resources += self._describe_tags_for_resource_type(resource_type) except ResourceNotFoundFaultError: pass return tagged_resources def delete_tags(self, resource_name, tag_keys): resource = self._get_resource_from_arn(resource_name) resource.delete_tags(tag_keys) redshift_backends = {} for region in boto.redshift.regions(): redshift_backends[region.name] = RedshiftBackend(ec2_backends[region.name], region.name)
# Copyright 2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import json import webob from webob import exc from xml.dom import minidom from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova.compute import api as compute_api from nova import exception from nova.network.security_group import openstack_driver from nova.network.security_group import quantum_driver from nova.openstack.common import log as logging from nova.virt import netutils LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'security_groups') softauth = extensions.soft_extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request.""" dom = xmlutil.safe_minidom_parse_string(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request.""" dom = xmlutil.safe_minidom_parse_string(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request.""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) self.compute_api = compute.API( security_group_api=self.security_group_api) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule['id'] sg_rule['parent_group_id'] = rule['parent_group_id'] sg_rule['ip_protocol'] = rule['protocol'] sg_rule['from_port'] = rule['from_port'] sg_rule['to_port'] = rule['to_port'] sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule['group_id']: source_group = self.security_group_api.get(context, id=rule['group_id']) sg_rule['group'] = {'name': source_group.get('name'), 'tenant_id': source_group.get('project_id')} else: sg_rule['ip_range'] = {'cidr': rule['cidr']} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group['id'] security_group['description'] = group['description'] security_group['name'] = group['name'] security_group['tenant_id'] = group['project_id'] security_group['rules'] = [] for rule in group['rules']: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group def _authorize_context(self, req): context = req.environ['nova.context'] authorize(context) return context def _from_body(self, body, key): if not body: raise exc.HTTPUnprocessableEntity() value = body.get(key, None) if value is None: raise exc.HTTPUnprocessableEntity() return value class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = self._authorize_context(req) id = self.security_group_api.validate_id(id) security_group = self.security_group_api.get(context, None, id, map_exception=True) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = self._authorize_context(req) id = self.security_group_api.validate_id(id) security_group = self.security_group_api.get(context, None, id, map_exception=True) self.security_group_api.destroy(context, security_group) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups.""" context = self._authorize_context(req) search_opts = {} search_opts.update(req.GET) raw_groups = self.security_group_api.list(context, project=context.project_id, search_opts=search_opts) limited_list = common.limited(raw_groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = self._authorize_context(req) security_group = self._from_body(body, 'security_group') group_name = security_group.get('name', None) group_description = security_group.get('description', None) self.security_group_api.validate_property(group_name, 'name', None) self.security_group_api.validate_property(group_description, 'description', None) group_ref = self.security_group_api.create_security_group( context, group_name, group_description) return {'security_group': self._format_security_group(context, group_ref)} class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = self._authorize_context(req) sg_rule = self._from_body(body, 'security_group_rule') parent_group_id = self.security_group_api.validate_id( sg_rule.get('parent_group_id', None)) security_group = self.security_group_api.get(context, None, parent_group_id, map_exception=True) try: new_rule = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if new_rule is None: msg = _("Not enough parameters to build a valid rule.") raise exc.HTTPBadRequest(explanation=msg) new_rule['parent_group_id'] = security_group['id'] if 'cidr' in new_rule: net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr']) if net != '0.0.0.0' and prefixlen == '0': msg = _("Bad prefix for network in cidr %s") % new_rule['cidr'] raise exc.HTTPBadRequest(explanation=msg) security_group_rule = ( self.security_group_api.create_security_group_rule( context, security_group, new_rule)) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _rule_args_to_dict(self, context, to_port=None, from_port=None, ip_protocol=None, cidr=None, group_id=None): if group_id is not None: group_id = self.security_group_api.validate_id(group_id) # check if groupId exists self.security_group_api.get(context, id=group_id) return self.security_group_api.new_group_ingress_rule( group_id, ip_protocol, from_port, to_port) else: cidr = self.security_group_api.parse_cidr(cidr) return self.security_group_api.new_cidr_ingress_rule( cidr, ip_protocol, from_port, to_port) def delete(self, req, id): context = self._authorize_context(req) id = self.security_group_api.validate_id(id) rule = self.security_group_api.get_rule(context, id) group_id = rule['parent_group_id'] security_group = self.security_group_api.get(context, None, group_id, map_exception=True) self.security_group_api.remove_rules(context, security_group, [rule['id']]) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = self._authorize_context(req) self.security_group_api.ensure_default(context) try: instance = self.compute_api.get(context, server_id) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) groups = self.security_group_api.get_instance_security_groups( req, instance['id'], instance['uuid'], True) result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) self.compute_api = compute.API( security_group_api=self.security_group_api) def _parse(self, body, action): try: body = body[action] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) return group_name def _invoke(self, method, context, id, group_name): try: instance = self.compute_api.get(context, id) method(context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) return webob.Response(status_int=202) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) group_name = self._parse(body, 'addSecurityGroup') return self._invoke(self.security_group_api.add_to_instance, context, id, group_name) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) group_name = self._parse(body, 'removeSecurityGroup') return self._invoke(self.security_group_api.remove_from_instance, context, id, group_name) class SecurityGroupsOutputController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupsOutputController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) def _extend_servers(self, req, servers): key = "security_groups" if not openstack_driver.is_quantum_security_groups(): for server in servers: instance = req.get_db_instance(server['id']) groups = instance.get(key) if groups: server[key] = [{"name": group["name"]} for group in groups] else: # If method is a POST we get the security groups intended for an # instance from the request. The reason for this is if using # quantum security groups the requested security groups for the # instance are not in the db and have not been sent to quantum yet. if req.method != 'POST': for server in servers: groups = ( self.security_group_api.get_instance_security_groups( req, server['id'])) if groups: server[key] = groups # In this section of code len(servers) == 1 as you can only POST # one server in an API request. else: try: # try converting to json req_obj = json.loads(req.body) # Add security group to server, if no security group was in # request add default since that is the group it is part of servers[0][key] = req_obj['server'].get( key, [{'name': 'default'}]) except ValueError: root = minidom.parseString(req.body) sg_root = root.getElementsByTagName(key) groups = [] if sg_root: security_groups = sg_root[0].getElementsByTagName( 'security_group') for security_group in security_groups: groups.append( {'name': security_group.getAttribute('name')}) if not groups: groups = [{'name': 'default'}] servers[0][key] = groups def _show(self, req, resp_obj): if not softauth(req.environ['nova.context']): return if 'server' in resp_obj.obj: resp_obj.attach(xml=SecurityGroupServerTemplate()) self._extend_servers(req, [resp_obj.obj['server']]) @wsgi.extends def show(self, req, resp_obj, id): return self._show(req, resp_obj) @wsgi.extends def create(self, req, resp_obj, body): return self._show(req, resp_obj) @wsgi.extends def detail(self, req, resp_obj): if not softauth(req.environ['nova.context']): return resp_obj.attach(xml=SecurityGroupServersTemplate()) self._extend_servers(req, list(resp_obj.obj['servers'])) class SecurityGroupsTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return "security_groups" in datum def make_server(elem): secgrps = SecurityGroupsTemplateElement('security_groups') elem.append(secgrps) secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group', selector="security_groups") secgrp.set('name') class SecurityGroupServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server') make_server(root) return xmlutil.SlaveTemplate(root, 1) class SecurityGroupServersTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) return xmlutil.SlaveTemplate(root, 1) class Security_groups(extensions.ExtensionDescriptor): """Security group support.""" name = "SecurityGroups" alias = "os-security-groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() actions = extensions.ControllerExtension(self, 'servers', controller) controller = SecurityGroupsOutputController() output = extensions.ControllerExtension(self, 'servers', controller) return [actions, output] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources class NativeSecurityGroupExceptions(object): @staticmethod def raise_invalid_property(msg): raise exc.HTTPBadRequest(explanation=msg) @staticmethod def raise_group_already_exists(msg): raise exc.HTTPBadRequest(explanation=msg) @staticmethod def raise_invalid_group(msg): raise exc.HTTPBadRequest(explanation=msg) @staticmethod def raise_invalid_cidr(cidr, decoding_exception=None): raise exception.InvalidCidr(cidr=cidr) @staticmethod def raise_over_quota(msg): raise exception.SecurityGroupLimitExceeded(msg) @staticmethod def raise_not_found(msg): raise exc.HTTPNotFound(explanation=msg) class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions, compute_api.SecurityGroupAPI): pass class NativeQuantumSecurityGroupAPI(NativeSecurityGroupExceptions, quantum_driver.SecurityGroupAPI): pass
import os import sys import argparse import ConfigParser import time import requests import urllib3 import json import socket import smtplib from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText # Enable to disable all alerts DISABLE_ALERTS = False # Simulate a fake migration (host maintenance) FAKE_MIGRATION = False # Enable to disable all logs DISABLE_LOGGING = True class GCEMaintenanceAlerts(): """ Send alerts for GCE Maintenance Events. - Email - Slack """ def __init__(self): """ Gather configuration settings. """ parser = argparse.ArgumentParser(description='Send alerts for GCE Maintenance Events.') # parser.add_argument('config', metavar='c', type='Path to the ini config file (default: working directory)') parser.add_argument('-c', '--config', help='Path to the ini config file (default: working directory)', required=False) args = parser.parse_args() # Check for config file path as argument if args.config: config_file = args.config else: # Default location is config.ini in script's directory config_file = '{}/config.ini'.format(os.path.dirname(os.path.realpath(__file__))) config = ConfigParser.ConfigParser() # Read ini file config.read(config_file) # Collect General config settings self.gce_project_name = config.get('General', 'gce_project_name') self.interval = float(config.get('General', 'interval')) self.alert_subject = config.get('General', 'alert_subject') # Collect Email config settings self.send_email = config.get('Email', 'send_email') if self.send_email.lower() == 'true': self.send_email = True else: if DISABLE_LOGGING == False: print('Email alerts disabled') self.send_email = False self.email_user = config.get('Email', 'email_user') self.email_pass = config.get('Email', 'email_pass') self.email_to = config.get('Email', 'email_to') if ',' in self.email_to: self.email_to = self.email_to.strip().replace(' ', '').split(', ') else: self.email_to = [self.email_to] self.smtp_host = config.get('Email', 'smtp_host') self.smtp_port = config.get('Email', 'smtp_port') # Collect Slack config settings self.send_slack = config.get('Slack', 'send_slack') if self.send_slack.lower() == 'true': self.send_slack = True else: if DISABLE_LOGGING == False: print('Slack alerts disabled') self.send_slack = False self.slack_url = config.get('Slack', 'slack_url') self.slack_username = config.get('Slack', 'slack_username') # Other config settings self.gce_metadata_url = 'http://metadata.google.internal/computeMetadata/v1/' self.gce_metadata_headers = {'Metadata-Flavor': 'Google'} self.gce_operations_url = 'https://console.cloud.google.com/compute/operations?project={}'.format(self.gce_project_name) self.gce_instances_url = 'https://console.cloud.google.com/compute/instances?project={}'.format(self.gce_project_name) self.alert_message = '' self.slack_headers = {'Content-type': 'application/json'} def check_maintenance_event(self, callback): """ Check metadata URL for GCE Maintenance Event. """ request_url = self.gce_metadata_url + 'instance/maintenance-event' last_maintenance_event = None last_etag = '0' hostname = socket.gethostname() # while True: if DISABLE_LOGGING == False: print('Making request to check for maintenance event...') # request = requests.get( # request_url, # params={ # 'last_etag': last_etag, # 'wait_for_change': True # }, # headers=self.gce_metadata_headers # ) http = urllib3.PoolManager(num_pools=1) request = http.request( 'GET', request_url, fields={ 'last_etag': last_etag, 'wait_for_change': True }, headers=self.gce_metadata_headers ) # During maintenance GCE can return a 503, so retry request # if request.status_code == 503: if request.status == 503: time.sleep(1) # continue # request.raise_for_status() # except urllib3.HTTPError as e: # print 'HTTPError %r' % e last_etag = request.headers['ETag'] if DISABLE_LOGGING == False: print('Maintenance Event: ' + request.data) # if request.text == 'NONE': if request.data == 'NONE': if FAKE_MIGRATION == False: maintenance_event = None else: # Fake a migration maintenance_event = 'MIGRATE_ON_HOST_MAINTENANCE' else: # Possible events: # MIGRATE_ON_HOST_MAINTENANCE = instance will be migrated # SHUTDOWN_ON_HOST_MAINTENANCE = instance will be shut down # maintenance_event = request.text maintenance_event = request.data # Check for which type of maintenance will be occurring if maintenance_event == 'MIGRATE_ON_HOST_MAINTENANCE': # msg['Subject'] = 'Urgent: GCE Maintenance Alert (migration): {}'.format(hostname) self.alert_subject = 'Urgent: GCE Maintenance Alert (migration): {}'.format(hostname) self.alert_message = 'Urgent: GCE Maintenance Alert for impending instance Migration in less than 60 seconds.\n\nInstance: {}'.format(hostname) elif maintenance_event == 'SHUTDOWN_ON_HOST_MAINTENANCE': # msg['Subject'] = 'Urgent: GCE Maintenance Alert (shutdown): {}'.format(hostname) self.alert_subject = 'Urgent: GCE Maintenance Alert (shutdown): {}'.format(hostname) self.alert_message = 'Urgent: GCE Maintenance Alert for impending instance Shutdown in less than 60 seconds.\n\nInstance: {}'.format(hostname) else: # msg['Subject'] = 'Urgent: GCE Maintenance Alert (unknown): {}'.format(hostname) self.alert_subject = 'Urgent: GCE Maintenance Alert (unknown): {}'.format(hostname) self.alert_message = 'Urgent: GCE Maintenance Alert for impending instance Maintenance in less than 60 seconds.\n\nInstance: {}'.format(hostname) if maintenance_event != last_maintenance_event: last_maintenance_event = maintenance_event # callback(maintenance_event) self.alert_maintenance_event(maintenance_event) def send_email_alert(self, to, subject, text): """ Send Email alert. """ if DISABLE_LOGGING == False: print('Sending Email alert...') text = text + '\n\nView GCE Operations Log:\n{}\n\nView GCE Instances:\n{}'.format(self.gce_operations_url, self.gce_instances_url) # Create message message = MIMEMultipart() message['From'] = self.email_user message['To'] = ', '.join(to) message['Subject'] = subject message.attach(MIMEText(text)) # Connect to mail server mail_server = smtplib.SMTP(self.smtp_host, self.smtp_port) mail_server.ehlo() # Encrypt connection mail_server.starttls() mail_server.ehlo() # Authenticate mail_server.login(self.email_user, self.email_pass) # Send mail # mail_server.sendmail(self.email_user, [to], message.as_string()) mail_server.sendmail(self.email_user, to, message.as_string()) mail_server.close() if DISABLE_LOGGING == False: print('Sent Email alert.') def send_slack_alert(self, subject, text, url): """ Send Slack alert. """ if DISABLE_LOGGING == False: print('Sending Slack alert...') subject = subject + '\n\n<{}|View GCE Operations Log>\n\n<{}|View GCE Instances>'.format(self.gce_operations_url, self.gce_instances_url) # Send Slack channel alert request = requests.post( url, # Make sure data is encoded as JSON data=json.dumps({ # 'channel': '#general', 'username': self.slack_username, 'icon_emoji': ':rotating_light:', 'text': subject }), headers=self.slack_headers ) if DISABLE_LOGGING == False: print('Sent Slack alert.') def alert_maintenance_event(self, event): """ Send alerts via email and Slack for GCE Maintenance Event. """ if event: if DISABLE_LOGGING == False: print('Undergoing host maintenance: {}'.format(event)) if DISABLE_ALERTS == False: if self.send_email == True: if DISABLE_LOGGING == False: print('Trigger to send Email alert') # self.send_email_alert(self.email_to, self.alert_subject, event) self.send_email_alert(self.email_to, self.alert_subject, self.alert_message) if self.send_slack == True: if DISABLE_LOGGING == False: print('Trigger to send Slack alert') # self.send_slack_alert(self.alert_subject, event, self.slack_url) self.send_slack_alert(self.alert_subject, self.alert_message, self.slack_url) # Prevent duplicate alerts (server will reboot in 45-60 seconds anyways) time.sleep(90) sys.exit() else: if DISABLE_LOGGING == False: print('Finished host maintenance') # def main(): # GMA.check_maintenance_event(GMA.alert_maintenance_event) # time.sleep(float(self.interval)) if __name__ == '__main__': GMA = GCEMaintenanceAlerts() # main() # GMA.check_maintenance_event(GMA.alert_maintenance_event) # time.sleep(GMA.interval) while(True): # main() GMA.check_maintenance_event(GMA.alert_maintenance_event) time.sleep(GMA.interval)
# Copyright 2022 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Beam pipelines to generate examples for the GlyphAzzn dataset.""" from absl import app from absl import flags import apache_beam as beam from magenta.models.svg_vae import svg_utils import numpy as np from tensor2tensor.data_generators import generator_utils import tensorflow.compat.v1 as tf tf.disable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_string( 'pipeline_options', '', 'Command line flags to use in constructing the Beam pipeline options.') flags.DEFINE_string( 'raw_data_file', '/path/to/parquetio-file', 'File where the raw data is (in parquetio format).') flags.DEFINE_string( 'final_data_file', '/path/to/final-dataset-train', 'File where the final data will be saved (in tfrecord format).') flags.DEFINE_string( 'final_stats_file', '/path/to/final-dataset-stats', 'File where the final data stats will be saved (in tfrecord format).') # pylint: disable=expression-not-assigned # pylint: disable=abstract-method # pylint: disable=arguments-differ ################## HELPERS FOR DATASET PROCESSING ################### def _is_valid_glyph(g): is_09 = 48 <= g['uni'] <= 57 is_capital_az = 65 <= g['uni'] <= 90 is_az = 97 <= g['uni'] <= 122 is_valid_dims = g['width'] != 0 and g['vwidth'] != 0 return (is_09 or is_capital_az or is_az) and is_valid_dims def _is_valid_path(pathunibfp): return pathunibfp[0] and len(pathunibfp[0]) <= 50 def _convert_to_path(g): """Converts SplineSet in SFD font to str path.""" path = svg_utils.sfd_to_path_list(g) path = svg_utils.add_missing_cmds(path, remove_zs=False) path = svg_utils.normalize_based_on_viewbox( path, '0 0 {} {}'.format(g['width'], g['vwidth'])) return path, g['uni'], g['binary_fp'] def _create_example(pathuni): """Bulk of dataset processing. Converts str path to serialized tf.Example.""" path, uni, binary_fp = pathuni final = {} # zoom out path = svg_utils.zoom_out(path) # make clockwise path = svg_utils.canonicalize(path) # render path for training final['rendered'] = svg_utils.per_step_render(path, absolute=True) # make path relative path = svg_utils.make_relative(path) # convert to vector vector = svg_utils.path_to_vector(path, categorical=True) # make simple vector vector = np.array(vector) vector = np.concatenate( [np.take(vector, [0, 4, 5, 9], axis=-1), vector[..., -6:]], axis=-1) # count some stats final['seq_len'] = np.shape(vector)[0] final['class'] = int(svg_utils.map_uni_to_alphanum(uni)) final['binary_fp'] = str(binary_fp) # append eos vector = svg_utils.append_eos(vector.tolist(), True, 10) # pad path to 51 (with eos) final['sequence'] = np.concatenate( (vector, np.zeros(((50 - final['seq_len']), 10))), 0) # make pure list: final['rendered'] = np.reshape(final['rendered'][..., 0], [64*64]).astype(np.float32).tolist() final['sequence'] = np.reshape(final['sequence'], [51*10]).astype(np.float32).tolist() final['class'] = np.reshape(final['class'], [1]).astype(np.int64).tolist() final['seq_len'] = np.reshape(final['seq_len'], [1]).astype(np.int64).tolist() return generator_utils.to_example(final).SerializeToString() def _decode_tfexample(serialized_example): """Decodes saved, serialized tfrecord example.""" eg = tf.train.Example.FromString(serialized_example) return { # add [0] after "value" if you want to just get value "1" instead of ["1"] 'class': np.reshape(eg.features.feature['class'].int64_list.value, [1]).astype(np.int64).tolist(), 'seq_len': eg.features.feature['seq_len'].int64_list.value[0], 'sequence': np.reshape(eg.features.feature['sequence'].float_list.value, [51*10]).astype(np.float32).tolist(), 'rendered': np.reshape(eg.features.feature['rendered'].float_list.value, [64*64]).astype(np.float32).tolist(), } def _mean_to_example(mean_stdev): """Converts the found mean and stdev to tfrecords example.""" # mean_stdev is a dict mean_stdev['mean'] = np.reshape(mean_stdev['mean'], [10]).astype(np.float32).tolist() mean_stdev['variance'] = np.reshape(mean_stdev['variance'], [10]).astype(np.float32).tolist() mean_stdev['stddev'] = np.reshape(mean_stdev['stddev'], [10]).astype(np.float32).tolist() mean_stdev['count'] = np.reshape(mean_stdev['count'], [1]).astype(np.int64).tolist() return generator_utils.to_example(mean_stdev) class MeanStddev(beam.CombineFn): """Apache Beam accumulator to compute the mean/stdev of svg commands.""" def create_accumulator(self): curr_sum = np.zeros([10]) sum_sq = np.zeros([10]) return (curr_sum, sum_sq, 0) # x, x^2, count def add_input(self, sum_count, new_input): (curr_sum, sum_sq, count) = sum_count # new_input is a dict with keys = ['seq_len', 'sequence'] new_seq_len = new_input['seq_len'] # remove padding and eos from sequence new_input = np.reshape(np.array(new_input['sequence']), [-1, 10]) new_input = new_input[:new_seq_len, :] # accumulate new_sum and new_sum_sq new_sum = np.sum([curr_sum, np.sum(new_input, axis=0)], axis=0) new_sum_sq = np.sum([sum_sq, np.sum(np.power(new_input, 2), axis=0)], axis=0) return new_sum, new_sum_sq, count + new_seq_len def merge_accumulators(self, accumulators): curr_sums, sum_sqs, counts = list(zip(*accumulators)) return np.sum(curr_sums, axis=0), np.sum(sum_sqs, axis=0), np.sum(counts) def extract_output(self, sum_count): (curr_sum, curr_sum_sq, count) = sum_count if count: mean = np.divide(curr_sum, count) variance = np.divide(curr_sum_sq, count) - np.power(mean, 2) # -ve value could happen due to rounding variance = np.max([variance, np.zeros(np.shape(variance))], axis=0) stddev = np.sqrt(variance) return { 'mean': mean, 'variance': variance, 'stddev': stddev, 'count': count } else: return { 'mean': float('NaN'), 'variance': float('NaN'), 'stddev': float('NaN'), 'count': 0 } ########################## PIPELINE GENERATORS ########################## def create_glyphazzn_dataset(filepattern, output_path): """Creates a glyphazzn dataset, from raw Parquetio to TFRecords.""" def pipeline(root): """Pipeline for creating glyphazzn dataset.""" attrs = ['uni', 'width', 'vwidth', 'sfd', 'id', 'binary_fp'] examples = root | 'Read' >> beam.io.parquetio.ReadFromParquet( file_pattern=filepattern, columns=attrs) examples = examples | 'FilterBadIcons' >> beam.Filter(_is_valid_glyph) examples = examples | 'ConvertToPath' >> beam.Map(_convert_to_path) examples = examples | 'FilterBadPathLenghts' >> beam.Filter(_is_valid_path) examples = examples | 'ProcessAndConvert' >> beam.Map(_create_example) (examples | 'WriteToTFRecord' >> beam.io.tfrecordio.WriteToTFRecord( output_path, num_shards=90)) return pipeline def get_stats_of_glyphazzn(filepattern, output_path): """Computes the Mean and Std across examples in glyphazzn dataset.""" def pipeline(root): """Pipeline for computing means/std from dataset.""" examples = root | 'Read' >> beam.io.tfrecordio.ReadFromTFRecord(filepattern) examples = examples | 'Deserialize' >> beam.Map(_decode_tfexample) examples = examples | 'GetMeanStdev' >> beam.CombineGlobally(MeanStddev()) examples = examples | 'MeanStdevToSerializedTFRecord' >> beam.Map( _mean_to_example) (examples | 'WriteToTFRecord' >> beam.io.tfrecordio.WriteToTFRecord( output_path, coder=beam.coders.ProtoCode(tf.train.Example))) return pipeline def main(_): pipeline_options = beam.options.pipeline_options.PipelineOptions( FLAGS.pipeline_options.split(',')) pipeline = create_glyphazzn_dataset( FLAGS.raw_data_file + '*', FLAGS.final_data_file) with beam.Pipeline(options=pipeline_options) as root: pipeline(root) pipeline = get_stats_of_glyphazzn( FLAGS.final_data_file + '*', FLAGS.final_stats_file) with beam.Pipeline(options=pipeline_options) as root: pipeline(root) if __name__ == '__main__': app.run(main)
# coding: utf-8 """ Tests of the isentropic package. References ---------- 1. NACA-TR-1135 http://hdl.handle.net/2060/19930091059 2. Anderson, J.D.: "Modern compressible flow", 3rd edition. """ from __future__ import division, absolute_import import numpy as np import numpy.testing import pytest from skaero.gasdynamics import isentropic def test_mach_angle(): M_list = [1.1, 1.38, 2.05, 3.0, np.inf] mu_list = [ 65.38, 46.44, 29.20, 19.47, 0.0 ] expected_mach_angles = [np.radians(val) for val in mu_list] mach_angles = [isentropic.mach_angle(M) for M in M_list] np.testing.assert_array_almost_equal(mach_angles, expected_mach_angles, decimal=3) def test_mach_angle_raises_error_when_mach_is_subsonic(): with pytest.raises(ValueError) as excinfo: isentropic.mach_angle(0.8) assert excinfo.exconly().startswith("ValueError: " "Mach number must be supersonic") def test_PrandtlMeyerExpansion_angle(): M_list = [1.2, 1.4, 2.6, 3.2, np.inf] nu_list = [ 3.558, 8.987, 41.41, 53.47, 130.45 ] expected_angles = [np.radians(val) for val in nu_list] turn_angles = [ isentropic.PrandtlMeyerExpansion.nu(M) for M in M_list] np.testing.assert_array_almost_equal(turn_angles, expected_angles, decimal=3) def test_default_gamma_for_new_IsentropicFlow(): _ = 1.0 # Unused value, equals or bigger than one pm = isentropic.PrandtlMeyerExpansion(_, _) assert pm.fl.gamma == 1.4 def test_PrandtlMeyerExpansion_example(): # Example 4.13 from Anderson. Default gamma=1.4 used fl = isentropic.IsentropicFlow() pm = isentropic.PrandtlMeyerExpansion(M_1=1.5, theta=np.radians(20), fl=fl) np.testing.assert_almost_equal(pm.M_2, 2.207, decimal=3) np.testing.assert_almost_equal(pm.p2_p1, 0.340, decimal=3) np.testing.assert_almost_equal(pm.T2_T1, 0.735, decimal=3) np.testing.assert_almost_equal(pm.mu_1, np.radians(41.81), decimal=3) np.testing.assert_almost_equal(pm.mu_2, np.radians(26.95), decimal=3) def test_PrandtlMeyerExpansion_raises_error_when_deflection_angle_is_over_the_maximum_and_mach_is_supersonic(): mach = 3.0 wrong_angle = np.radians(125) with pytest.raises(ValueError) as excinfo: isentropic.PrandtlMeyerExpansion(mach, wrong_angle) assert excinfo.exconly().startswith("ValueError: Deflection angle must " "be lower than maximum") def test_PrandtlMeyerExpansion_raises_error_when_Mach_is_subsonic(): wrong_mach = 0.9 with pytest.raises(ValueError) as excinfo: isentropic.PrandtlMeyerExpansion.nu(wrong_mach) assert excinfo.exconly().startswith("ValueError: Mach number must " "be supersonic") def test_isentropic_flow_has_the_gamma_indicated_in_constructor(): gamma = 1.4 flow = isentropic.IsentropicFlow(gamma) np.testing.assert_almost_equal(flow.gamma, gamma, decimal=3) def test_pressure_ratio(): fl = isentropic.IsentropicFlow(1.4) M_list = [0.0, 0.27, 0.89, 1.0, 1.30, 2.05] expected_pressure_ratios = [ 1.0, 0.9506, 0.5977, 0.5283, 0.3609, 0.1182 ] np.testing.assert_array_almost_equal( fl.p_p0(M_list), expected_pressure_ratios, decimal=4 ) def test_area_ratio(): fl = isentropic.IsentropicFlow(1.4) M_list = [0.0, 0.38, 0.79, 1.0, 1.24, 2.14] expected_area_ratios = [ np.infty, 1.6587, 1.0425, 1.0, 1.043, 1.902 ] np.testing.assert_array_almost_equal( fl.A_Astar(M_list), expected_area_ratios, decimal=3 ) def test_area_ratio_no_zero_division_error(): fl = isentropic.IsentropicFlow() assert np.isposinf(fl.A_Astar(0)) def test_mach_from_area_ratio_raises_error_when_ratio_is_subsonic(): with pytest.raises(ValueError): isentropic.mach_from_area_ratio(0.9) def test_speed_of_sound_ratio(): fl = isentropic.IsentropicFlow(1.4) M_list = [0.0, 0.3, 1.0, 1.3, 2.5] expected_sound_speed_ratios = [1.0, 0.99112, 0.91287, 0.86451, 0.6667] np.testing.assert_array_almost_equal( fl.a_a0(M_list), expected_sound_speed_ratios, decimal=3 ) def test_mach_from_area_ratio_subsonic(): fl = isentropic.IsentropicFlow(1.4) # TODO: further investigation required in np.inf A_Astar_list = [ # np.inf, 2.4027, 1.7780, 1.0382, 1.0, ] expected_ratios = [ # 0.0, 0.25, 0.35, 0.8, 1.0 ] mach_from_area_ratios = [ isentropic.mach_from_area_ratio(A_Astar, fl)[0] # Subsonic for A_Astar in A_Astar_list] np.testing.assert_array_almost_equal( mach_from_area_ratios, expected_ratios, decimal=3 ) def test_mach_from_area_ratio_supersonic(): fl = isentropic.IsentropicFlow(1.4) A_Astar_list = [ 1.0, 1.043, 1.328, 1.902, 4.441 ] expected_ratios = [ 1.0, 1.24, 1.69, 2.14, 3.05 ] mach_from_area_ratios = [ isentropic.mach_from_area_ratio(A_Astar, fl)[1] # Supersonic for A_Astar in A_Astar_list] np.testing.assert_array_almost_equal( mach_from_area_ratios, expected_ratios, decimal=2 ) def test_density_ratio(): fl = isentropic.IsentropicFlow(1.4) M_list = [0.0, 0.27, 0.89, 1.0, 1.30, 2.05] expected_density_ratios = [1.0, 0.96446008, 0.69236464, 0.63393815, 0.48290279, 0.21760078] density_ratios = fl.rho_rho0(M_list) np.testing.assert_array_almost_equal( density_ratios, expected_density_ratios, decimal=4 )
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Wed Jul 19 19:44:45 2017 @author: login """ from collections import OrderedDict import pandas as pd pd.options.mode.chained_assignment = None import numpy as np from scipy.io import loadmat import shutil, sys, os from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler, MinMaxScaler import cPickle as pickle def combine_similar_procs(mod_df, obs_df): mod_df2, obs_df2 = mod_df.copy(), obs_df.copy() mod_df2.loc[:, 'Methane Oxidation'] = mod_df2.loc[:, 'Methane Oxidation (nitrate)'] + \ mod_df2.loc[:, 'Methane Oxidation (oxygen)'] + \ mod_df2.loc[:, 'Methane Oxidation (sulfate)'] mod_df2.loc[:, 'Sulfur Oxidation'] = mod_df2.loc[:, 'Sulfur Oxidation (nitrate)'] + \ mod_df2.loc[:, 'Sulfur Oxidation (oxygen)'] # drop all model data that is not scorable grp1 = set(obs_df2.columns) grp2 = set(mod_df2.columns) odd_cols = list(grp1.symmetric_difference(grp2)) mod_df2.drop(odd_cols, axis=1, inplace=True) return mod_df2 def recreate_opt_sequence(path, opt_opt): if opt_opt == 'optimum': opt_type = 'new_model' else: opt_type = 'old_model' starting_point = set(load_optimization_var_list(opt_type)) pickle_files = sorted([i for i in os.listdir(path) if i.endswith(".p")]) pickle_paths = [os.path.join(path, i) for i in pickle_files] var_trace = [] for pp in pickle_paths: with open( pp, "rb" ) as pickle_h: pickle_pack = pickle.load(pickle_h) to_optimize = pickle_pack[1] dropped = starting_point.symmetric_difference(set(to_optimize)) assert len(dropped) == 1 var_trace.append(dropped) starting_point.remove(list(dropped)[0]) return var_trace def load_optimization_var_list(opt_option_str): """ If 'old_model' is specified, only parameters available in the old model can be loaded for optimization, but if the 'new_model' is specified, then the additional precipitation and mass action rate constant can be optimized. """ if opt_option_str == 'old_model': to_optimize = [('nitrogen_ratio'), ('carbon_source_from_5e4'), ('diffusion_constant'), ('primary_ox_rate_const'), ('C'), ('N-'), ('CH4'), ('S-'), ('nitrogen_source'), ('oxygen_source'), ('fe_precipitation'), ('carbon_precip'), ('ma_op_fe_n_rate_const') ] elif opt_option_str == 'new_model': to_optimize = [('ma_op_s_n_rate_const'), ('nitrogen_ratio'), ('carbon_source_from_5e4'), ('diffusion_constant'), ('primary_ox_rate_const'), ('C'), ('ma_op_fe_n_rate_const'), ('N-'), ('CH4'), ('sminus_precipitation'), ('ma_op_ch4_n_rate_const'), ('S-'), ('nitrogen_source'), ('oxygen_source'), ('fe_precipitation'), ('carbon_precip')] else: sys.exit("illegal optimization option specified on command line") return to_optimize def importratesandconcs_mod(path_, type_str=None): """ 1. Create a date index for the new dataframes 2. Create new dictionaries to hold the new dataframes 3. Unload each DF one at a time 4. Interpolate each depth vector along new axis 5. Load into new numpy array 6. Assign date index & numpy array to new dataframe object 7. Reload new dataframe into new dictionary, accessible by name string 8. Return newly minted dictionaries """ conc_idxs = OrderedDict() rate_idxs = OrderedDict() conc_idxs[0] = "O" conc_idxs[1] = "C" conc_idxs[2] = "N+" conc_idxs[3] = "N-" conc_idxs[4] = "S+" conc_idxs[5] = "S-" conc_idxs[6] = "Fe+" conc_idxs[7] = "Fe-" conc_idxs[8] = "CH4" conc_idxs[9] = "Null" rate_idxs[0] = "Iron Oxidation (oxygen)" rate_idxs[1] = "Ammonia Oxidation (oxygen)" rate_idxs[2] = "Sulfur Oxidation (oxygen)" rate_idxs[3] = "Iron Oxidation (nitrate)" rate_idxs[4] = "Sulfur Oxidation (nitrate)" rate_idxs[5] = "Methane Oxidation (oxygen)" rate_idxs[6] = "Methane Oxidation (nitrate)" rate_idxs[7] = "Methane Oxidation (sulfate)" rate_idxs[8] = "Aerobic Heterotrophy" rate_idxs[9] = "Denitrification" rate_idxs[10] = "Iron Reduction" rate_idxs[11] = "Sulfate Reduction" rate_idxs[12] = "Methanogenesis" if os.path.exists(path_): mat = loadmat(path_) concs_ = mat['concs_history'] rates_ = mat['rates_history'] else: concs_ = np.zeros((100, 17, 10)) rates_ = np.zeros((100, 17, 13)) if concs_.shape[0] != 100: concs_ = np.zeros((100, 17, 10)) rates_ = np.zeros((100, 17, 13)) conc_idxs_inv = {v: k for k, v in conc_idxs.iteritems()} rate_idxs_inv = {v: k for k, v in rate_idxs.iteritems()} mat_dict = {} inputs = [concs_, rates_] translators = [conc_idxs_inv, rate_idxs_inv] for i_arr, t_dict in zip(inputs, translators): for name, idx_z in t_dict.items(): mat_dict[name] = pd.DataFrame(data=i_arr[:, :, idx_z].T, columns=range(0,100), index=range(6,23)) n_days = 146 start_date, end_date = '03/23/2013', '08/15/2013' dr = pd.date_range(start_date, end_date) assert len(dr) == n_days new_mat_dict = {} for a_spec in mat_dict.keys(): this_df = mat_dict[a_spec] depths, n_slices = this_df.shape assert n_slices < n_days idx = np.arange(n_slices) new_interval = max(idx) / float(n_days) new_columns = np.arange(idx.min(), idx.max(), new_interval) new_df_data = np.zeros((depths, len(new_columns))) for depth in xrange(depths): a_vector = this_df.ix[depth+6, :].values f2 = interp1d(idx, a_vector, kind='cubic') new_df_data[depth, :] = f2(new_columns) new_df = pd.DataFrame(data=new_df_data.T, columns=np.arange(6,6+depths), index=dr) if type_str and type_str == 'full df': new_mat_dict[a_spec] = new_df.T.unstack() else: new_mat_dict[a_spec] = new_df.T all_cols = sorted(new_mat_dict.keys()) if type_str and type_str == 'full df': full_idx = new_mat_dict[all_cols[0]].index full_df = pd.DataFrame(index=full_idx, columns=all_cols) for name in all_cols: full_df.ix[:, name] = new_mat_dict[name] return full_df else: return new_mat_dict def copyDirectory(src, dest): try: shutil.copytree(src, dest) except shutil.Error as e: print "error: %s" % e sys.exit("do") except OSError as e: print "error: %s" % e sys.exit("rae") from scipy.interpolate import interp1d def turn_mat_into_single_df(mat): columns = ['nitrate', 'sulfate', 'iron(II)', 'oxygen' ] z_idxs = [2, 4, 7, 0 ] mat_dict = {} for idx, col in zip(z_idxs, columns): mat_dict[col] = pd.DataFrame(data=mat[:,:,idx].T, columns=range(0,100), index=range(6,23)) n_days = 146 start_date, end_date = '03/23/2013', '08/15/2013' dr = pd.date_range(start_date, end_date) assert len(dr) == n_days new_mat_dict = {} for a_spec in mat_dict.keys(): this_df = mat_dict[a_spec] depths, n_slices = this_df.shape assert n_slices < n_days idx = np.arange(n_slices) new_interval = max(idx) / float(n_days) new_columns = np.arange(idx.min(), idx.max(), new_interval) new_df_data = np.zeros((depths, len(new_columns))) for depth in xrange(depths): a_vector = this_df.ix[depth+6, :].values f2 = interp1d(idx, a_vector, kind='cubic') new_df_data[depth, :] = f2(new_columns) new_df = pd.DataFrame(data=new_df_data.T, columns=np.arange(6,6+depths), index=dr) new_mat_dict[a_spec] = new_df.T.unstack() all_cols = sorted(new_mat_dict.keys()) full_idx = new_mat_dict[all_cols[0]].index full_df = pd.DataFrame(index=full_idx, columns=all_cols) for name in all_cols: full_df.ix[:, name] = new_mat_dict[name] return full_df def standard_scale_df(df): sklearn_ss = StandardScaler() if type(df) == type(pd.DataFrame()): std_data = sklearn_ss.fit_transform(df.values) return pd.DataFrame(data=std_data, index=df.index, columns=df.columns) elif type(df) == type(pd.Series()): std_data = sklearn_ss.fit_transform(df.values.reshape(-1, 1)) return pd.Series(data=std_data.flatten(), index=df.index) else: sys.exit("Unrecognized data type for scaling") def min_max_scale_df(df): sklearn_ss = MinMaxScaler() if type(df) == type(pd.DataFrame()): std_data = sklearn_ss.fit_transform(df.values) return pd.DataFrame(data=std_data, index=df.index, columns=df.columns) elif type(df) == type(pd.Series()): std_data = sklearn_ss.fit_transform(df.values.reshape(-1, 1)) return pd.Series(data=std_data.flatten(), index=df.index) else: sys.exit("Unrecognized data type for scaling") def score_results(obs_df_, data_df_, score_type): # temporal subsetting obs_df, data_df = obs_df_.copy(), data_df_.copy() bool1 = data_df.index.isin(obs_df.index) bool2 = obs_df.index.isin(data_df.index) sub_data_df = data_df[bool1] sub_obs_df = obs_df[bool2] sub_data_df = combine_similar_procs(sub_data_df, sub_obs_df) # drop all columns that aren't related to the specific objective if score_type == 'gene_objective': pass elif score_type == 'conc_objective': to_keep = set(['Fe-', 'N+', 'S+', 'O']) all_cols = set(sub_obs_df.columns) to_drop = to_keep.symmetric_difference(all_cols) sub_data_df.drop(to_drop, axis=1, inplace=True) sub_obs_df.drop(to_drop, axis=1, inplace=True) r2_array = np.zeros((len(sub_obs_df.columns),)) for idx, col_ in enumerate(sub_obs_df.columns): obs_vec = sub_obs_df.ix[:, col_].dropna() mod_vec = sub_data_df.ix[:, col_].dropna() bool_11 = obs_vec.index.isin(mod_vec.index) bool_22 = mod_vec.index.isin(obs_vec.index) obs_vec_nn = obs_vec[bool_11] mod_vec_nn = mod_vec[bool_22] obs_vec_std = standard_scale_df(obs_vec_nn) data_vec_std = standard_scale_df(mod_vec_nn) obs_vals = obs_vec_std.values model_vals = data_vec_std.values r2_array[idx] = r2_score(model_vals, obs_vals) print "{}: {}".format(col_, r2_array[idx]) r2_array[r2_array < -1.] = -1. print r2_array.sum() return r2_array.sum() import subprocess as sp import platform def run_model(arg_tuple): subdf, out_f, obs_data, score_type = arg_tuple print os.path.basename(out_f) if platform.system() == 'Linux': run_cmd = "" else: run_cmd = "/Applications/MATLAB_R2016b.app/bin/" # tell subprocess where and what to execute model_loc = os.path.dirname(out_f) source_dir = os.path.join(os.getcwd(), 'lake_model') copyDirectory(source_dir, model_loc) input_args_loc = os.path.join(model_loc, 'baseline.txt') # write out the parameter set into the right location subdf.T.to_csv(input_args_loc, header=False, index=False, float_format='%g') init_val_f = os.path.join(model_loc, "concs0.txt") apply_conc_multiplier(subdf, init_val_f) #matlab -nodisplay -nojvm -nosplash -nodesktop -r 'calibration_kaw; exit' run_cmd = run_cmd +"matlab -nodisplay -nojvm -nosplash -nodesktop " run_cmd = run_cmd +"-r 'calibration_kaw; exit'" # what is the output file name ? output_loc = os.path.join(model_loc, 'outFile.txt') with open(output_loc, 'w') as out_h: out_h.write(out_f) # run the model p = sp.Popen(run_cmd, cwd=model_loc, shell=True, stderr=sp.PIPE, stdout=sp.PIPE) stdout, stderr = p.communicate() # pull results & return them to memory results_df = importratesandconcs_mod(out_f, 'full df') r2 = score_results(obs_data, results_df, score_type) shutil.rmtree(model_loc) return r2 def param_lims(): limits = {'carbon_source_from_5e4': (1e4, 1e5), 'sminus_precipitation': (0, 0.4), 'ma_op_s_n_rate_const': (0, 0.5), 'ma_op_ch4_n_rate_const': (0, 100), 'S-': (0,1.36), 'nitrogen_source': (0, 193), 'nitrogen_ratio': (0, 0.1), 'oxygen_source': (6.6e2, 6.6e4), 'methane_source': (0, 8e5), 'fe_precipitation': (0, 0.4), 'carbon_precip': (0, 0.4), 'diffusion_constant': (5, 158), 'ma_op_fe_n_rate_const': (0.6, 3), 'primary_ox_rate_const': (3.0e-5,3.0), 'C':(0, 2.8), 'N-': (0, 8.24), 'CH4': (0, 10.)} return limits def fill_param_dict(params, init_bool, n_samplings, zero_pct, to_optimize, stds): limits_ = param_lims() n_zeros = int(n_samplings*zero_pct) n_non_zero = n_samplings - n_zeros parameter_mat = np.zeros ( (n_samplings, len(params.keys()))) parameter_df = pd.DataFrame(index=np.arange(1,n_samplings+1), columns=params.keys(), data=parameter_mat) for idx, key in enumerate(params.keys()): # start at the defaults this_mean = params[key] # only randomize for paramters that need optimization if key in to_optimize: if init_bool: low_, high_ = limits_[key] this_sample = abs(np.random.uniform(low_, high_, (n_non_zero,1))) else: this_std = stds[key] this_sample = abs(np.random.normal(this_mean, abs(this_std)+np.finfo(float).eps, (n_non_zero,1))) else: this_sample = np.ones((n_non_zero,1))*this_mean if key != 't_max': this_sample = np.vstack((this_sample, np.zeros((n_zeros,1)))) else: this_sample = np.vstack((this_sample, np.ones((n_zeros,1))*this_mean )) np.random.shuffle(this_sample) # create dataframe parameter_df.ix[:, key] = this_sample return parameter_df def best_val(param_df, variable): best_score = param_df.score.max() best_bool = param_df.score == best_score best_idx = param_df[best_bool].index return param_df.ix[best_idx, variable].values[0] def apply_conc_multiplier(param_subdf, f_name): columns = ["S-", "C", "N-", "CH4"] col_no = [5, 1, 3, 8] multiplier = [param_subdf[i] for i in columns] conc0 = pd.read_csv(f_name, header=None) for c_num, mult in zip(col_no, multiplier): conc0.ix[:, c_num] *= mult conc0.to_csv(f_name, float_format="%g", header=False, index=False) return None def load_param_dict(type_str): """ This function loads and ordered dict containing the parameters supplied to the lake model, to be written out into a csv in the specified order. The options include 'midpoint' which starts the optmization at the midpoint of the proscribed limits for all parameters, 'default', which loads the default parameters supplied by Sarah at the start of all of this, or 'midpoint-default' which starts the optimization at the midpoints of all the parameters, but turns off all the new precipitation & mass action rate constants that were added based on the analysis of the metagenomic model """ params = OrderedDict() if type_str == 'midpoint': params['oxygen_bubble_rate'] = 0.0 params['nitrogen_source'] = 96.5 params['nitrogen_ratio'] = 0.05 params['carbon_source_from_5e4'] = 5.5e4 params['oxygen_source'] = 6.6e3 params['methane_source'] = 2830.0 params['t_max'] = 0.4 params['sminus_precipitation'] = 0.2 params['fe_precipitation'] = 0.2 params['carbon_precip'] = 0.2 params['diffusion_constant'] = 50 params['ma_op_o_fe_rate_const'] = 10 params['ma_op_o_n_rate_const'] = 5.0 params['ma_op_o_s_rate_const'] = 0.16 params['ma_op_fe_n_rate_const'] = 1.0 params['ma_op_s_n_rate_const'] = 0.25 params['ma_op_ch4_o_rate_const'] = 1e4 params['ma_op_ch4_n_rate_const'] = 50.0 params['ma_op_ch4_s_rate_const'] = 0.01 params['primary_ox_rate_const'] = 1.5 params['c_lim_o'] = 20.0 params['c_lim_n'] = 5.0 params['c_lim_fe'] = 0.1 params['c_lim_s'] = 30 params['C'] = 1. params['N-'] = 1. params['S-'] = 1. params['CH4'] = 1. elif type_str == 'default': params['oxygen_bubble_rate'] = 0.0 params['nitrogen_source'] = 0.0 params['nitrogen_ratio'] = 0.1 params['carbon_source_from_5e4'] = 9.4e4 params['oxygen_source'] = 6.6e3 params['methane_source'] = 2830.0 params['t_max'] = 0.4 params['sminus_precipitation'] = 0. params['fe_precipitation'] = 0.3 params['carbon_precip'] = 0.3 params['diffusion_constant'] = 50 params['ma_op_o_fe_rate_const'] = 10 params['ma_op_o_n_rate_const'] = 5.0 params['ma_op_o_s_rate_const'] = 0.16 params['ma_op_fe_n_rate_const'] = 1.0 params['ma_op_s_n_rate_const'] = 0. params['ma_op_ch4_o_rate_const'] = 1e4 params['ma_op_ch4_n_rate_const'] = 0. params['ma_op_ch4_s_rate_const'] = 0.01 params['primary_ox_rate_const'] = 1. params['c_lim_o'] = 20.0 params['c_lim_n'] = 5.0 params['c_lim_fe'] = 0.1 params['c_lim_s'] = 30 params['C'] = 1. params['N-'] = 1. params['S-'] = 1. params['CH4'] = 1. elif 'default-midpoint': params['oxygen_bubble_rate'] = 0.0 params['nitrogen_source'] = 0.0 params['nitrogen_ratio'] = 0.05 params['carbon_source_from_5e4'] = 5.5e4 params['oxygen_source'] = 6.6e3 params['methane_source'] = 2830.0 params['t_max'] = 0.4 params['sminus_precipitation'] = 0.0 params['fe_precipitation'] = 0.2 params['carbon_precip'] = 0.2 params['diffusion_constant'] = 50 params['ma_op_o_fe_rate_const'] = 10 params['ma_op_o_n_rate_const'] = 5.0 params['ma_op_o_s_rate_const'] = 0.16 params['ma_op_fe_n_rate_const'] = 1.0 params['ma_op_s_n_rate_const'] = 0.0 params['ma_op_ch4_o_rate_const'] = 1e4 params['ma_op_ch4_n_rate_const'] = 0.0 params['ma_op_ch4_s_rate_const'] = 0.01 params['primary_ox_rate_const'] = 1.5 params['c_lim_o'] = 20.0 params['c_lim_n'] = 5.0 params['c_lim_fe'] = 0.1 params['c_lim_s'] = 30 params['C'] = 1. params['N-'] = 1. params['S-'] = 1. params['CH4'] = 1. else: sys.exit("'midpoint' or 'default' are valid types for param dicts") return params
#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Javelin makes resources that should survive an upgrade. Javelin is a tool for creating, verifying, and deleting a small set of resources in a declarative way. """ import argparse import datetime import os import sys import unittest import yaml import tempest.auth from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging from tempest.openstack.common import timeutils from tempest.services.compute.json import flavors_client from tempest.services.compute.json import servers_client from tempest.services.identity.json import identity_client from tempest.services.image.v2.json import image_client from tempest.services.object_storage import container_client from tempest.services.object_storage import object_client from tempest.services.telemetry.json import telemetry_client from tempest.services.volume.json import volumes_client OPTS = {} USERS = {} RES = {} LOG = None JAVELIN_START = datetime.datetime.utcnow() class OSClient(object): _creds = None identity = None servers = None def __init__(self, user, pw, tenant): _creds = tempest.auth.KeystoneV2Credentials( username=user, password=pw, tenant_name=tenant) _auth = tempest.auth.KeystoneV2AuthProvider(_creds) self.identity = identity_client.IdentityClientJSON(_auth) self.servers = servers_client.ServersClientJSON(_auth) self.objects = object_client.ObjectClient(_auth) self.containers = container_client.ContainerClient(_auth) self.images = image_client.ImageClientV2JSON(_auth) self.flavors = flavors_client.FlavorsClientJSON(_auth) self.telemetry = telemetry_client.TelemetryClientJSON(_auth) self.volumes = volumes_client.VolumesClientJSON(_auth) def load_resources(fname): """Load the expected resources from a yaml flie.""" return yaml.load(open(fname, 'r')) def keystone_admin(): return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name) def client_for_user(name): LOG.debug("Entering client_for_user") if name in USERS: user = USERS[name] LOG.debug("Created client for user %s" % user) return OSClient(user['name'], user['pass'], user['tenant']) else: LOG.error("%s not found in USERS: %s" % (name, USERS)) ################### # # TENANTS # ################### def create_tenants(tenants): """Create tenants from resource definition. Don't create the tenants if they already exist. """ admin = keystone_admin() _, body = admin.identity.list_tenants() existing = [x['name'] for x in body] for tenant in tenants: if tenant not in existing: admin.identity.create_tenant(tenant) else: LOG.warn("Tenant '%s' already exists in this environment" % tenant) def destroy_tenants(tenants): admin = keystone_admin() for tenant in tenants: tenant_id = admin.identity.get_tenant_by_name(tenant)['id'] r, body = admin.identity.delete_tenant(tenant_id) ############## # # USERS # ############## def _users_for_tenant(users, tenant): u_for_t = [] for user in users: for n in user: if user[n]['tenant'] == tenant: u_for_t.append(user[n]) return u_for_t def _tenants_from_users(users): tenants = set() for user in users: for n in user: tenants.add(user[n]['tenant']) return tenants def _assign_swift_role(user): admin = keystone_admin() resp, roles = admin.identity.list_roles() role = next(r for r in roles if r['name'] == 'Member') LOG.debug(USERS[user]) try: admin.identity.assign_user_role( USERS[user]['tenant_id'], USERS[user]['id'], role['id']) except exceptions.Conflict: # don't care if it's already assigned pass def create_users(users): """Create tenants from resource definition. Don't create the tenants if they already exist. """ global USERS LOG.info("Creating users") admin = keystone_admin() for u in users: try: tenant = admin.identity.get_tenant_by_name(u['tenant']) except exceptions.NotFound: LOG.error("Tenant: %s - not found" % u['tenant']) continue try: admin.identity.get_user_by_username(tenant['id'], u['name']) LOG.warn("User '%s' already exists in this environment" % u['name']) except exceptions.NotFound: admin.identity.create_user( u['name'], u['pass'], tenant['id'], "%s@%s" % (u['name'], tenant['id']), enabled=True) def destroy_users(users): admin = keystone_admin() for user in users: tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id'] user_id = admin.identity.get_user_by_username(tenant_id, user['name'])['id'] r, body = admin.identity.delete_user(user_id) def collect_users(users): global USERS LOG.info("Collecting users") admin = keystone_admin() for u in users: tenant = admin.identity.get_tenant_by_name(u['tenant']) u['tenant_id'] = tenant['id'] USERS[u['name']] = u body = admin.identity.get_user_by_username(tenant['id'], u['name']) USERS[u['name']]['id'] = body['id'] class JavelinCheck(unittest.TestCase): def __init__(self, users, resources): super(JavelinCheck, self).__init__() self.users = users self.res = resources def runTest(self, *args): pass def check(self): self.check_users() self.check_objects() self.check_servers() self.check_volumes() self.check_telemetry() def check_users(self): """Check that the users we expect to exist, do. We don't use the resource list for this because we need to validate that things like tenantId didn't drift across versions. """ LOG.info("checking users") for name, user in self.users.iteritems(): client = keystone_admin() _, found = client.identity.get_user(user['id']) self.assertEqual(found['name'], user['name']) self.assertEqual(found['tenantId'], user['tenant_id']) # also ensure we can auth with that user, and do something # on the cloud. We don't care about the results except that it # remains authorized. client = client_for_user(user['name']) resp, body = client.servers.list_servers() self.assertEqual(resp['status'], '200') def check_objects(self): """Check that the objects created are still there.""" if not self.res.get('objects'): return LOG.info("checking objects") for obj in self.res['objects']: client = client_for_user(obj['owner']) r, contents = client.objects.get_object( obj['container'], obj['name']) source = _file_contents(obj['file']) self.assertEqual(contents, source) def check_servers(self): """Check that the servers are still up and running.""" if not self.res.get('servers'): return LOG.info("checking servers") for server in self.res['servers']: client = client_for_user(server['owner']) found = _get_server_by_name(client, server['name']) self.assertIsNotNone( found, "Couldn't find expected server %s" % server['name']) r, found = client.servers.get_server(found['id']) # get the ipv4 address addr = found['addresses']['private'][0]['addr'] for count in range(60): return_code = os.system("ping -c1 " + addr) if return_code is 0: break self.assertNotEqual(count, 59, "Server %s is not pingable at %s" % ( server['name'], addr)) def check_telemetry(self): """Check that ceilometer provides a sane sample. Confirm that there are more than one sample and that they have the expected metadata. If in check mode confirm that the oldest sample available is from before the upgrade. """ LOG.info("checking telemetry") for server in self.res['servers']: client = client_for_user(server['owner']) response, body = client.telemetry.list_samples( 'instance', query=('metadata.display_name', 'eq', server['name']) ) self.assertEqual(response.status, 200) self.assertTrue(len(body) >= 1, 'expecting at least one sample') self._confirm_telemetry_sample(server, body[-1]) def check_volumes(self): """Check that the volumes are still there and attached.""" if not self.res.get('volumes'): return LOG.info("checking volumes") for volume in self.res['volumes']: client = client_for_user(volume['owner']) vol_body = _get_volume_by_name(client, volume['name']) self.assertIsNotNone( vol_body, "Couldn't find expected volume %s" % volume['name']) # Verify that a volume's attachment retrieved server_id = _get_server_by_name(client, volume['server'])['id'] attachment = client.volumes.get_attachment_from_volume(vol_body) self.assertEqual(vol_body['id'], attachment['volume_id']) self.assertEqual(server_id, attachment['server_id']) def _confirm_telemetry_sample(self, server, sample): """Check this sample matches the expected resource metadata.""" # Confirm display_name self.assertEqual(server['name'], sample['resource_metadata']['display_name']) # Confirm instance_type of flavor flavor = sample['resource_metadata'].get( 'flavor.name', sample['resource_metadata'].get('instance_type') ) self.assertEqual(server['flavor'], flavor) # Confirm the oldest sample was created before upgrade. if OPTS.mode == 'check': oldest_timestamp = timeutils.normalize_time( timeutils.parse_isotime(sample['timestamp'])) self.assertTrue( oldest_timestamp < JAVELIN_START, 'timestamp should come before start of second javelin run' ) ####################### # # OBJECTS # ####################### def _file_contents(fname): with open(fname, 'r') as f: return f.read() def create_objects(objects): if not objects: return LOG.info("Creating objects") for obj in objects: LOG.debug("Object %s" % obj) _assign_swift_role(obj['owner']) client = client_for_user(obj['owner']) client.containers.create_container(obj['container']) client.objects.create_object( obj['container'], obj['name'], _file_contents(obj['file'])) def destroy_objects(objects): for obj in objects: client = client_for_user(obj['owner']) r, body = client.objects.delete_object(obj['container'], obj['name']) if not (200 <= int(r['status']) < 299): raise ValueError("unable to destroy object: [%s] %s" % (r, body)) ####################### # # IMAGES # ####################### def _resolve_image(image, imgtype): name = image[imgtype] fname = os.path.join(OPTS.devstack_base, image['imgdir'], name) return name, fname def _get_image_by_name(client, name): r, body = client.images.image_list() for image in body: if name == image['name']: return image return None def create_images(images): if not images: return LOG.info("Creating images") for image in images: client = client_for_user(image['owner']) # only upload a new image if the name isn't there if _get_image_by_name(client, image['name']): LOG.info("Image '%s' already exists" % image['name']) continue # special handling for 3 part image extras = {} if image['format'] == 'ami': name, fname = _resolve_image(image, 'aki') r, aki = client.images.create_image( 'javelin_' + name, 'aki', 'aki') client.images.store_image(aki.get('id'), open(fname, 'r')) extras['kernel_id'] = aki.get('id') name, fname = _resolve_image(image, 'ari') r, ari = client.images.create_image( 'javelin_' + name, 'ari', 'ari') client.images.store_image(ari.get('id'), open(fname, 'r')) extras['ramdisk_id'] = ari.get('id') _, fname = _resolve_image(image, 'file') r, body = client.images.create_image( image['name'], image['format'], image['format'], **extras) image_id = body.get('id') client.images.store_image(image_id, open(fname, 'r')) def destroy_images(images): if not images: return LOG.info("Destroying images") for image in images: client = client_for_user(image['owner']) response = _get_image_by_name(client, image['name']) if not response: LOG.info("Image '%s' does not exists" % image['name']) continue client.images.delete_image(response['id']) ####################### # # SERVERS # ####################### def _get_server_by_name(client, name): r, body = client.servers.list_servers() for server in body['servers']: if name == server['name']: return server return None def _get_flavor_by_name(client, name): r, body = client.flavors.list_flavors() for flavor in body: if name == flavor['name']: return flavor return None def create_servers(servers): if not servers: return LOG.info("Creating servers") for server in servers: client = client_for_user(server['owner']) if _get_server_by_name(client, server['name']): LOG.info("Server '%s' already exists" % server['name']) continue image_id = _get_image_by_name(client, server['image'])['id'] flavor_id = _get_flavor_by_name(client, server['flavor'])['id'] resp, body = client.servers.create_server(server['name'], image_id, flavor_id) server_id = body['id'] client.servers.wait_for_server_status(server_id, 'ACTIVE') def destroy_servers(servers): if not servers: return LOG.info("Destroying servers") for server in servers: client = client_for_user(server['owner']) response = _get_server_by_name(client, server['name']) if not response: LOG.info("Server '%s' does not exist" % server['name']) continue client.servers.delete_server(response['id']) client.servers.wait_for_server_termination(response['id'], ignore_error=True) ####################### # # VOLUMES # ####################### def _get_volume_by_name(client, name): r, body = client.volumes.list_volumes() for volume in body: if name == volume['display_name']: return volume return None def create_volumes(volumes): for volume in volumes: client = client_for_user(volume['owner']) # only create a volume if the name isn't here if _get_volume_by_name(client, volume['name']): LOG.info("volume '%s' already exists" % volume['name']) continue size = volume['gb'] v_name = volume['name'] resp, body = client.volumes.create_volume(size=size, display_name=v_name) client.volumes.wait_for_volume_status(body['id'], 'available') def destroy_volumes(volumes): for volume in volumes: client = client_for_user(volume['owner']) volume_id = _get_volume_by_name(client, volume['name'])['id'] client.volumes.detach_volume(volume_id) client.volumes.delete_volume(volume_id) def attach_volumes(volumes): for volume in volumes: client = client_for_user(volume['owner']) server_id = _get_server_by_name(client, volume['server'])['id'] volume_id = _get_volume_by_name(client, volume['name'])['id'] device = volume['device'] client.volumes.attach_volume(volume_id, server_id, device) ####################### # # MAIN LOGIC # ####################### def create_resources(): LOG.info("Creating Resources") # first create keystone level resources, and we need to be admin # for those. create_tenants(RES['tenants']) create_users(RES['users']) collect_users(RES['users']) # next create resources in a well known order create_objects(RES['objects']) create_images(RES['images']) create_servers(RES['servers']) create_volumes(RES['volumes']) attach_volumes(RES['volumes']) def destroy_resources(): LOG.info("Destroying Resources") # Destroy in inverse order of create destroy_servers(RES['servers']) destroy_images(RES['images']) destroy_objects(RES['objects']) destroy_volumes(RES['volumes']) destroy_users(RES['users']) destroy_tenants(RES['tenants']) LOG.warn("Destroy mode incomplete") def get_options(): global OPTS parser = argparse.ArgumentParser( description='Create and validate a fixed set of OpenStack resources') parser.add_argument('-m', '--mode', metavar='<create|check|destroy>', required=True, help=('One of (create, check, destroy)')) parser.add_argument('-r', '--resources', required=True, metavar='resourcefile.yaml', help='Resources definition yaml file') parser.add_argument( '-d', '--devstack-base', required=True, metavar='/opt/stack/old', help='Devstack base directory for retrieving artifacts') parser.add_argument( '-c', '--config-file', metavar='/etc/tempest.conf', help='path to javelin2(tempest) config file') # auth bits, letting us also just source the devstack openrc parser.add_argument('--os-username', metavar='<auth-user-name>', default=os.environ.get('OS_USERNAME'), help=('Defaults to env[OS_USERNAME].')) parser.add_argument('--os-password', metavar='<auth-password>', default=os.environ.get('OS_PASSWORD'), help=('Defaults to env[OS_PASSWORD].')) parser.add_argument('--os-tenant-name', metavar='<auth-tenant-name>', default=os.environ.get('OS_TENANT_NAME'), help=('Defaults to env[OS_TENANT_NAME].')) OPTS = parser.parse_args() if OPTS.mode not in ('create', 'check', 'destroy'): print("ERROR: Unknown mode -m %s\n" % OPTS.mode) parser.print_help() sys.exit(1) if OPTS.config_file: config.CONF.set_config_path(OPTS.config_file) def setup_logging(): global LOG logging.setup(__name__) LOG = logging.getLogger(__name__) def main(): global RES get_options() setup_logging() RES = load_resources(OPTS.resources) if OPTS.mode == 'create': create_resources() # Make sure the resources we just created actually work checker = JavelinCheck(USERS, RES) checker.check() elif OPTS.mode == 'check': collect_users(RES['users']) checker = JavelinCheck(USERS, RES) checker.check() elif OPTS.mode == 'destroy': collect_users(RES['users']) destroy_resources() else: LOG.error('Unknown mode %s' % OPTS.mode) return 1 LOG.info('javelin2 successfully finished') return 0 if __name__ == "__main__": sys.exit(main())
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import module as mojom # This module provides a mechanism for determining the packed order and offsets # of a mojom.Struct. # # ps = pack.PackedStruct(struct) # ps.packed_fields will access a list of PackedField objects, each of which # will have an offset, a size and a bit (for mojom.BOOLs). # Size of struct header in bytes: num_bytes [4B] + version [4B]. HEADER_SIZE = 8 class PackedField(object): kind_to_size = { mojom.BOOL: 1, mojom.INT8: 1, mojom.UINT8: 1, mojom.INT16: 2, mojom.UINT16: 2, mojom.INT32: 4, mojom.UINT32: 4, mojom.FLOAT: 4, mojom.HANDLE: 4, mojom.MSGPIPE: 4, mojom.SHAREDBUFFER: 4, mojom.DCPIPE: 4, mojom.DPPIPE: 4, mojom.NULLABLE_HANDLE: 4, mojom.NULLABLE_MSGPIPE: 4, mojom.NULLABLE_SHAREDBUFFER: 4, mojom.NULLABLE_DCPIPE: 4, mojom.NULLABLE_DPPIPE: 4, mojom.INT64: 8, mojom.UINT64: 8, mojom.DOUBLE: 8, mojom.STRING: 8, mojom.NULLABLE_STRING: 8 } @classmethod def GetSizeForKind(cls, kind): if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct, mojom.Interface)): return 8 if isinstance(kind, mojom.Union): return 16 if isinstance(kind, mojom.InterfaceRequest): kind = mojom.MSGPIPE if isinstance(kind, mojom.Enum): # TODO(mpcomplete): what about big enums? return cls.kind_to_size[mojom.INT32] if not kind in cls.kind_to_size: raise Exception("Invalid kind: %s" % kind.spec) return cls.kind_to_size[kind] @classmethod def GetAlignmentForKind(cls, kind): if isinstance(kind, mojom.Interface): return 4 if isinstance(kind, mojom.Union): return 8 return cls.GetSizeForKind(kind) def __init__(self, field, index, ordinal): """ Args: field: the original field. index: the position of the original field in the struct. ordinal: the ordinal of the field for serialization. """ self.field = field self.index = index self.ordinal = ordinal self.size = self.GetSizeForKind(field.kind) self.alignment = self.GetAlignmentForKind(field.kind) self.offset = None self.bit = None self.min_version = None def GetPad(offset, alignment): """Returns the pad necessary to reserve space so that |offset + pad| equals to some multiple of |alignment|.""" return (alignment - (offset % alignment)) % alignment def GetFieldOffset(field, last_field): """Returns a 2-tuple of the field offset and bit (for BOOLs).""" if (field.field.kind == mojom.BOOL and last_field.field.kind == mojom.BOOL and last_field.bit < 7): return (last_field.offset, last_field.bit + 1) offset = last_field.offset + last_field.size pad = GetPad(offset, field.alignment) return (offset + pad, 0) def GetPayloadSizeUpToField(field): """Returns the payload size (not including struct header) if |field| is the last field. """ if not field: return 0 offset = field.offset + field.size pad = GetPad(offset, 8) return offset + pad class PackedStruct(object): def __init__(self, struct): self.struct = struct # |packed_fields| contains all the fields, in increasing offset order. self.packed_fields = [] # |packed_fields_in_ordinal_order| refers to the same fields as # |packed_fields|, but in ordinal order. self.packed_fields_in_ordinal_order = [] # No fields. if (len(struct.fields) == 0): return # Start by sorting by ordinal. src_fields = self.packed_fields_in_ordinal_order ordinal = 0 for index, field in enumerate(struct.fields): if field.ordinal is not None: ordinal = field.ordinal src_fields.append(PackedField(field, index, ordinal)) ordinal += 1 src_fields.sort(key=lambda field: field.ordinal) # Set |min_version| for each field. next_min_version = 0 for packed_field in src_fields: if packed_field.field.min_version is None: assert next_min_version == 0 else: assert packed_field.field.min_version >= next_min_version next_min_version = packed_field.field.min_version packed_field.min_version = next_min_version if (packed_field.min_version != 0 and mojom.IsReferenceKind(packed_field.field.kind) and not packed_field.field.kind.is_nullable): raise Exception("Non-nullable fields are only allowed in version 0 of " "a struct. %s.%s is defined with [MinVersion=%d]." % (self.struct.name, packed_field.field.name, packed_field.min_version)) src_field = src_fields[0] src_field.offset = 0 src_field.bit = 0 dst_fields = self.packed_fields dst_fields.append(src_field) # Then find first slot that each field will fit. for src_field in src_fields[1:]: last_field = dst_fields[0] for i in xrange(1, len(dst_fields)): next_field = dst_fields[i] offset, bit = GetFieldOffset(src_field, last_field) if offset + src_field.size <= next_field.offset: # Found hole. src_field.offset = offset src_field.bit = bit dst_fields.insert(i, src_field) break last_field = next_field if src_field.offset is None: # Add to end src_field.offset, src_field.bit = GetFieldOffset(src_field, last_field) dst_fields.append(src_field) class ByteInfo(object): def __init__(self): self.is_padding = False self.packed_fields = [] def GetByteLayout(packed_struct): total_payload_size = GetPayloadSizeUpToField( packed_struct.packed_fields[-1] if packed_struct.packed_fields else None) bytes = [ByteInfo() for i in xrange(total_payload_size)] limit_of_previous_field = 0 for packed_field in packed_struct.packed_fields: for i in xrange(limit_of_previous_field, packed_field.offset): bytes[i].is_padding = True bytes[packed_field.offset].packed_fields.append(packed_field) limit_of_previous_field = packed_field.offset + packed_field.size for i in xrange(limit_of_previous_field, len(bytes)): bytes[i].is_padding = True for byte in bytes: # A given byte cannot both be padding and have a fields packed into it. assert not (byte.is_padding and byte.packed_fields) return bytes class VersionInfo(object): def __init__(self, version, num_fields, num_bytes): self.version = version self.num_fields = num_fields self.num_bytes = num_bytes def GetVersionInfo(packed_struct): """Get version information for a struct. Args: packed_struct: A PackedStruct instance. Returns: A non-empty list of VersionInfo instances, sorted by version in increasing order. Note: The version numbers may not be consecutive. """ versions = [] last_version = 0 last_num_fields = 0 last_payload_size = 0 for packed_field in packed_struct.packed_fields_in_ordinal_order: if packed_field.min_version != last_version: versions.append( VersionInfo(last_version, last_num_fields, last_payload_size + HEADER_SIZE)) last_version = packed_field.min_version last_num_fields += 1 # The fields are iterated in ordinal order here. However, the size of a # version is determined by the last field of that version in pack order, # instead of ordinal order. Therefore, we need to calculate the max value. last_payload_size = max(GetPayloadSizeUpToField(packed_field), last_payload_size) assert len(versions) == 0 or last_num_fields != versions[-1].num_fields versions.append(VersionInfo(last_version, last_num_fields, last_payload_size + HEADER_SIZE)) return versions
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenthread from oslo.config import cfg from oslo import messaging import sqlalchemy as sa from sqlalchemy.orm import exc from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import agent as ext_agent from neutron import manager from neutron.openstack.common.db import exception as db_exc from neutron.openstack.common import excutils from neutron.openstack.common import jsonutils from neutron.openstack.common import log as logging from neutron.openstack.common import timeutils LOG = logging.getLogger(__name__) cfg.CONF.register_opt( cfg.IntOpt('agent_down_time', default=9, help=_("Seconds to regard the agent is down; should be at " "least twice report_interval, to be sure the " "agent is down for good."))) class Agent(model_base.BASEV2, models_v2.HasId): """Represents agents running in neutron deployments.""" __table_args__ = ( sa.UniqueConstraint('agent_type', 'host', name='uniq_agents0agent_type0host'), ) # L3 agent, DHCP agent, OVS agent, LinuxBridge agent_type = sa.Column(sa.String(255), nullable=False) binary = sa.Column(sa.String(255), nullable=False) # TOPIC is a fanout exchange topic topic = sa.Column(sa.String(255), nullable=False) # TOPIC.host is a target topic host = sa.Column(sa.String(255), nullable=False) admin_state_up = sa.Column(sa.Boolean, default=True, nullable=False) # the time when first report came from agents created_at = sa.Column(sa.DateTime, nullable=False) # the time when first report came after agents start started_at = sa.Column(sa.DateTime, nullable=False) # updated when agents report heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False) # description is note for admin user description = sa.Column(sa.String(255)) # configurations: a json dict string, I think 4095 is enough configurations = sa.Column(sa.String(4095), nullable=False) @property def is_active(self): return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp) class AgentDbMixin(ext_agent.AgentPluginBase): """Mixin class to add agent extension to db_plugin_base_v2.""" def _get_agent(self, context, id): try: agent = self._get_by_id(context, Agent, id) except exc.NoResultFound: raise ext_agent.AgentNotFound(id=id) return agent @classmethod def is_agent_down(cls, heart_beat_time): return timeutils.is_older_than(heart_beat_time, cfg.CONF.agent_down_time) def get_configuration_dict(self, agent_db): try: conf = jsonutils.loads(agent_db.configurations) except Exception: msg = _('Configuration for agent %(agent_type)s on host %(host)s' ' is invalid.') LOG.warn(msg, {'agent_type': agent_db.agent_type, 'host': agent_db.host}) conf = {} return conf def _make_agent_dict(self, agent, fields=None): attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get( ext_agent.RESOURCE_NAME + 's') res = dict((k, agent[k]) for k in attr if k not in ['alive', 'configurations']) res['alive'] = not AgentDbMixin.is_agent_down( res['heartbeat_timestamp']) res['configurations'] = self.get_configuration_dict(agent) return self._fields(res, fields) def delete_agent(self, context, id): with context.session.begin(subtransactions=True): agent = self._get_agent(context, id) context.session.delete(agent) def update_agent(self, context, id, agent): agent_data = agent['agent'] with context.session.begin(subtransactions=True): agent = self._get_agent(context, id) agent.update(agent_data) return self._make_agent_dict(agent) def get_agents_db(self, context, filters=None): query = self._get_collection_query(context, Agent, filters=filters) return query.all() def get_agents(self, context, filters=None, fields=None): return self._get_collection(context, Agent, self._make_agent_dict, filters=filters, fields=fields) def _get_agent_by_type_and_host(self, context, agent_type, host): query = self._model_query(context, Agent) try: agent_db = query.filter(Agent.agent_type == agent_type, Agent.host == host).one() return agent_db except exc.NoResultFound: raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type, host=host) except exc.MultipleResultsFound: raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type, host=host) def get_agent(self, context, id, fields=None): agent = self._get_agent(context, id) return self._make_agent_dict(agent, fields) def _create_or_update_agent(self, context, agent): with context.session.begin(subtransactions=True): res_keys = ['agent_type', 'binary', 'host', 'topic'] res = dict((k, agent[k]) for k in res_keys) configurations_dict = agent.get('configurations', {}) res['configurations'] = jsonutils.dumps(configurations_dict) current_time = timeutils.utcnow() try: agent_db = self._get_agent_by_type_and_host( context, agent['agent_type'], agent['host']) res['heartbeat_timestamp'] = current_time if agent.get('start_flag'): res['started_at'] = current_time greenthread.sleep(0) agent_db.update(res) except ext_agent.AgentNotFoundByTypeHost: greenthread.sleep(0) res['created_at'] = current_time res['started_at'] = current_time res['heartbeat_timestamp'] = current_time res['admin_state_up'] = True agent_db = Agent(**res) greenthread.sleep(0) context.session.add(agent_db) greenthread.sleep(0) def create_or_update_agent(self, context, agent): """Create or update agent according to report.""" try: return self._create_or_update_agent(context, agent) except db_exc.DBDuplicateEntry as e: with excutils.save_and_reraise_exception() as ctxt: if e.columns == ['agent_type', 'host']: # It might happen that two or more concurrent transactions # are trying to insert new rows having the same value of # (agent_type, host) pair at the same time (if there has # been no such entry in the table and multiple agent status # updates are being processed at the moment). In this case # having a unique constraint on (agent_type, host) columns # guarantees that only one transaction will succeed and # insert a new agent entry, others will fail and be rolled # back. That means we must retry them one more time: no # INSERTs will be issued, because # _get_agent_by_type_and_host() will return the existing # agent entry, which will be updated multiple times ctxt.reraise = False return self._create_or_update_agent(context, agent) class AgentExtRpcCallback(object): """Processes the rpc report in plugin implementations.""" target = messaging.Target(version='1.0') START_TIME = timeutils.utcnow() def __init__(self, plugin=None): self.plugin = plugin def report_state(self, context, **kwargs): """Report state from agent to server.""" time = kwargs['time'] time = timeutils.parse_strtime(time) if self.START_TIME > time: LOG.debug(_("Message with invalid timestamp received")) return agent_state = kwargs['agent_state']['agent_state'] if not self.plugin: self.plugin = manager.NeutronManager.get_plugin() self.plugin.create_or_update_agent(context, agent_state)
import pytest def _setup_batchup_path(monkeypatch, path): from batchup import config def get_batchup_path_patch(): return path monkeypatch.setattr(config, 'get_batchup_path', get_batchup_path_patch) def _setup_batchup_temp(monkeypatch): import tempfile tdir = tempfile.mkdtemp() _setup_batchup_path(monkeypatch, tdir) return tdir def _teardown_batchup_temp(tdir): import shutil shutil.rmtree(tdir) def _setup_batchup_temp_and_urlretrieve(monkeypatch, downloads=None): import tempfile from batchup import config tdir = _setup_batchup_temp(monkeypatch) def urlretrieve_patch(url, path, reporthook): with open(path, 'w') as f: f.write(url) if downloads is not None: downloads.append((url, path)) reporthook(1, len(url), len(url)) monkeypatch.setattr(config, 'urlretrieve', urlretrieve_patch) return tdir def test_get_data_dir(monkeypatch): import os from batchup import config _setup_batchup_path(monkeypatch, 'test') assert config.get_data_dir() == os.path.join('test', 'data') def test_get_data_path(monkeypatch): import os from batchup import config _setup_batchup_path(monkeypatch, 'test') assert config.get_data_path('foo.xyz') == os.path.join( 'test', 'data', 'foo.xyz') assert config.get_data_path(os.path.abspath('foo.xyz')) == \ os.path.abspath('foo.xyz') def test_download(monkeypatch, capsys): import os from batchup import config _downloads = [] tdir = _setup_batchup_temp_and_urlretrieve(monkeypatch, _downloads) download_path = os.path.join(tdir, 'a', 'b', 'c', 'd.h5') res = config.download(download_path, 'myurl/d.h5') out, err = capsys.readouterr() line1 = 'Downloading {} to {}'.format('myurl/d.h5', download_path) line2 = '\rDownloading {} {:.2%}'.format('d.h5', 1.0) assert out == '{}\n{}\r'.format(line1, line2) assert res == download_path assert len(_downloads) == 1 assert _downloads[0][0] == 'myurl/d.h5' assert _downloads[0][1] == download_path assert os.path.exists(os.path.join(tdir, 'a', 'b', 'c')) assert os.path.exists(download_path) assert open(download_path, 'r').read() == 'myurl/d.h5' # Attempting to download a second time should exit res = config.download(download_path, 'myurl/d.h5') assert res == download_path assert len(_downloads) == 1 out, err = capsys.readouterr() assert out == '' _teardown_batchup_temp(tdir) def test_download_err(monkeypatch): import os import tempfile import shutil from batchup import config _downloads = [] def urlretrieve_patch(url, path, reporthook): _downloads.append((url, path)) with open(path, 'w') as f: f.write('hello world') raise ValueError tdir = tempfile.mkdtemp() _setup_batchup_path(monkeypatch, tdir) monkeypatch.setattr(config, 'urlretrieve', urlretrieve_patch) download_path = os.path.join(tdir, 'a', 'b', 'c', 'd.h5') with pytest.raises(ValueError): config.download(download_path, 'myurl/d.h5') assert _downloads[0][0] == 'myurl/d.h5' assert _downloads[0][1] == download_path assert os.path.exists(os.path.join(tdir, 'a', 'b', 'c')) # `download` should have removed the file assert not os.path.exists(download_path) shutil.rmtree(tdir) def test_compute_sha256(): import os import tempfile import shutil import hashlib from batchup import config tdir = tempfile.mkdtemp() fpath = os.path.join(tdir, 'hi.txt') with open(fpath, 'w') as f: f.write('hello world') hasher = hashlib.sha256() hasher.update(b'hello world') expected = hasher.hexdigest() assert config.compute_sha256(fpath) == expected shutil.rmtree(tdir) def test_verify_file(capsys): import os import tempfile import shutil import hashlib from batchup import config tdir = tempfile.mkdtemp() fpath = os.path.join(tdir, 'hi.txt') invalid_path = os.path.join(tdir, 'bye.txt') with open(fpath, 'w') as f: f.write('hello world') hasher = hashlib.sha256() hasher.update(b'hello world') expected = hasher.hexdigest() bad_hasher = hashlib.sha256() bad_hasher.update(b'goodbye world') bad_expected = bad_hasher.hexdigest() # File should pass assert config.verify_file(fpath, expected) # Should not pass non-existent file assert not config.verify_file(invalid_path, expected) # Should not pass invalid hash assert not config.verify_file(fpath, bad_expected) # File should pass with *no* specified hash, but should generate output assert config.verify_file(fpath, None) out, err = capsys.readouterr() line1 = 'SHA-256 of {}:'.format(fpath) line2 = ' "{}"'.format(expected) assert out == '{}\n{}\n'.format(line1, line2) shutil.rmtree(tdir) def test_download_data(monkeypatch, capsys): import os import tempfile import shutil import hashlib from batchup import config # Good and bad expected hashes hasher = hashlib.sha256() hasher.update(b'hello world') expected = hasher.hexdigest() _downloads = [] _download_data = [] def download_patch(path, url): dir_name = os.path.dirname(path) if not os.path.exists(dir_name): os.makedirs(dir_name) _downloads.append((url, path)) data, err = _download_data[0] del _download_data[0] with open(path, 'w') as f: f.write(data) if err is not None: raise err() return path tdir = tempfile.mkdtemp() _setup_batchup_path(monkeypatch, tdir) monkeypatch.setattr(config, 'download', download_patch) # Successful download on first time _download_data[:] = [('hello world', None)] download_name = 'd.h5' download_path = config.get_data_path(download_name) res = config.download_data(download_name, 'myurl/d.h5', expected) out, err = capsys.readouterr() assert out == '' assert res == download_path assert len(_downloads) == 1 assert _downloads[0] == ('myurl/d.h5', download_path + '.unverified') assert os.path.exists(download_path) assert open(download_path, 'r').read() == 'hello world' del _downloads[:] # Attempting to download a second time should exit _download_data[:] = [('hello world', None)] res = config.download_data(download_name, 'myurl/d.h5', expected) assert res == download_path assert len(_downloads) == 0 out, err = capsys.readouterr() assert out == '' # Attempting to download a second time with no hash should print the hash _download_data[:] = [('hello world', None)] res = config.download_data(download_name, 'myurl/d.h5', None) assert res == download_path assert len(_downloads) == 0 out, err = capsys.readouterr() assert out == 'The SHA-256 of {} is "{}"\n'.format(download_path, expected) os.remove(download_path) # Now test for download errors _download_data[:] = [ ('', RuntimeError), # Raise an exception on the 1st try ('goodbye world', None), # Wrong data (fail verification) on 2nd ('hello world', None), ] res = config.download_data(download_name, 'myurl/d.h5', expected) out, err = capsys.readouterr() line1 = ('Download of {} unsuccessful; error {}; ' 'deleting and re-trying...'.format('myurl/d.h5', RuntimeError())) line2 = ('Download of {} unsuccessful; verification failed; ' 'deleting and re-trying...'.format('myurl/d.h5')) assert res == download_path assert len(_downloads) == 3 assert out == '{}\n{}\n'.format(line1, line2) del _downloads[:] os.remove(download_path) # Now test for complete failure _download_data[:] = [ ('', RuntimeError), # Raise an exception on the 1st try ('', RuntimeError), # Raise an exception on the 2nd try ('goodbye world', None), # Wrong data (fail verification) on 2nd ] res = config.download_data(download_name, 'myurl/d.h5', expected) out, err = capsys.readouterr() line1 = ('Download of {} unsuccessful; error {}; ' 'deleting and re-trying...'.format('myurl/d.h5', RuntimeError())) line2 = ('Download of {} unsuccessful; error {}; ' 'deleting and re-trying...'.format('myurl/d.h5', RuntimeError())) line3 = ('Download of {} unsuccessful; verification failed; ' 'deleting and re-trying...'.format('myurl/d.h5')) line4 = ('Did not succeed in downloading {} (tried {} times)'.format( 'myurl/d.h5', config._MAX_DOWNLOAD_TRIES)) assert res is None assert len(_downloads) == 3 assert out == '{}\n{}\n{}\n{}\n'.format(line1, line2, line3, line4) shutil.rmtree(tdir) def test_copy_data(monkeypatch, capsys): import os import tempfile import shutil import hashlib from batchup import config _copy = shutil.copy _copies = [] def copy_patch(source, dest): _copy(source, dest) _copies.append((source, dest)) tdir = tempfile.mkdtemp() _setup_batchup_path(monkeypatch, tdir) monkeypatch.setattr(shutil, 'copy', copy_patch) # Good and bad expected hashes hasher = hashlib.sha256() hasher.update(b'hello world') expected = hasher.hexdigest() bad_hasher = hashlib.sha256() bad_hasher.update(b'goodbye world') bad_expected = bad_hasher.hexdigest() source_path = os.path.join(tdir, 'source.txt') dest_name = 'dest.txt' dest_path = config.get_data_path(dest_name) with open(source_path, 'w') as f: f.write('hello world') # Successful download on first time res = config.copy_data(dest_name, source_path, expected) out, err = capsys.readouterr() assert out == '' assert res == dest_path assert len(_copies) == 1 assert _copies[0] == (source_path, dest_path + '.unverified') assert os.path.exists(dest_path) assert open(dest_path, 'r').read() == 'hello world' out, err = capsys.readouterr() assert out == '' del _copies[:] # Attempting to download a second time should exit res = config.copy_and_verify(dest_path, source_path, expected) assert res == dest_path assert len(_copies) == 0 out, err = capsys.readouterr() assert out == '' # Attempting to download a second time with no hash should print the hash res = config.copy_and_verify(dest_path, source_path, None) assert res == dest_path assert len(_copies) == 0 out, err = capsys.readouterr() assert out == 'The SHA-256 of {} is "{}"\n'.format(dest_path, expected) os.remove(dest_path) del _copies[:] # Try failing verification res = config.copy_and_verify(dest_path, source_path, bad_expected) out, err = capsys.readouterr() assert out == 'SHA verification of file {} failed\n'.format(source_path) assert res is None assert len(_copies) == 1 assert _copies[0] == (source_path, dest_path + '.unverified') assert not os.path.exists(dest_path) del _copies[:] shutil.rmtree(tdir)
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-08-01 23:15 import autoslug.fields import common.utils import datetime from django.conf import settings import django.contrib.postgres.fields from django.db import migrations, models import django.db.models.deletion from django.utils.timezone import utc import open_humans.storage import private_sharing.models class Migration(migrations.Migration): initial = True dependencies = [ ('open_humans', '0003_auto_20151223_1827'), ('oauth2_provider', '__first__'), ('open_humans', '0004_member_badges'), ] operations = [ migrations.CreateModel( name='DataRequestProject', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_study', models.BooleanField(choices=[(True, 'Study'), (False, 'Activity')], help_text='A "study" is doing human subjects research and must have Institutional Review Board approval or equivalent ethics board oversight. Activities can be anything else, e.g. data visualizations.', verbose_name='Is this project a study or an activity?')), ('name', models.CharField(max_length=100, verbose_name='Project name')), ('leader', models.CharField(max_length=100, verbose_name='Leader(s) or principal investigator(s)')), ('organization', models.CharField(max_length=100, verbose_name='Organization or institution')), ('contact_email', models.EmailField(max_length=254, verbose_name='Contact email for your project')), ('info_url', models.URLField(verbose_name='URL for general information about your project')), ('short_description', models.CharField(max_length=140, verbose_name='A short description')), ('long_description', models.TextField(max_length=1000, verbose_name='A long description')), ('active', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active,\nit won\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.')), ('badge_image', models.ImageField(blank=True, help_text="A badge that will be displayed on the user's profile once they've connected your project.", max_length=1024, storage=open_humans.storage.PublicStorage(), upload_to=private_sharing.models.badge_upload_path)), ('request_sources_access', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to")), ('request_message_permission', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text='Permission to send messages to the member. This does not grant access to their email address.', verbose_name='Are you requesting permission to message users?')), ('request_username_access', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text="Access to the member's username. This implicitly enables access to anything the user is publicly sharing on Open Humans. Note that this is potentially sensitive and/or identifying.", verbose_name='Are you requesting Open Humans usernames?')), ('approved', models.BooleanField(default=False)), ('created', models.DateTimeField(auto_now_add=True)), ('last_updated', models.DateTimeField(auto_now=True)), ('api_access_secret', models.CharField(max_length=64)), ], options={ 'verbose_name_plural': 'Data request activities', }, ), migrations.CreateModel( name='DataRequestProjectMember', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user_id_code', models.CharField(max_length=16)), ('message_permission', models.BooleanField()), ('username_shared', models.BooleanField()), ('sources_shared', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None)), ('member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member')), ], ), migrations.CreateModel( name='OAuth2DataRequestProject', fields=[ ('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')), ('enrollment_url', models.URLField(help_text="The URL we direct members to if they're interested in sharing data with your project.", verbose_name='Enrollment URL')), ('redirect_url', models.CharField(help_text='The return URL for our "authorization code" OAuth2 grant\n process. You can <a target="_blank" href="">read more about OAuth2\n "authorization code" transactions here</a>.', max_length=256, verbose_name='Redirect URL')), ('application', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)), ], options={ 'verbose_name': 'OAuth2 data request project', }, bases=('private_sharing.datarequestproject',), ), migrations.CreateModel( name='OnSiteDataRequestProject', fields=[ ('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')), ('consent_text', models.TextField(help_text='The "informed consent" text that describes your project to Open Humans members.')), ('post_sharing_url', models.URLField(blank=True, help_text='If provided, after authorizing sharing the\nmember will be taken to this URL. If this URL includes "PROJECT_MEMBER_ID"\nwithin it, we will replace that with the member\'s project-specific\nproject_member_id. This allows you to direct them to an external survey you\noperate (e.g. using Google Forms) where a pre-filled project_member_id field\nallows you to connect those responses to corresponding data in Open Humans.', verbose_name='Post-sharing URL')), ], options={ 'verbose_name': 'On-site data request project', }, bases=('private_sharing.datarequestproject',), ), migrations.AddField( model_name='datarequestprojectmember', name='project', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_members', to='private_sharing.DataRequestProject'), ), migrations.AddField( model_name='datarequestproject', name='coordinator', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='open_humans.Member'), ), migrations.AlterField( model_name='datarequestproject', name='long_description', field=models.TextField(max_length=1000, verbose_name='A long description (1000 characters max)'), ), migrations.AlterField( model_name='datarequestproject', name='short_description', field=models.CharField(max_length=140, verbose_name='A short description (140 characters max)'), ), migrations.AlterField( model_name='datarequestprojectmember', name='member', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member'), ), migrations.RenameField( model_name='datarequestprojectmember', old_name='user_id_code', new_name='project_member_id', ), migrations.AlterField( model_name='datarequestprojectmember', name='project_member_id', field=models.CharField(max_length=16, unique=True), ), migrations.AlterField( model_name='datarequestproject', name='request_sources_access', field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"), ), migrations.AlterField( model_name='datarequestproject', name='active', field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.'), ), migrations.AddField( model_name='datarequestprojectmember', name='created', field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 3, 4, 5, 14, 50, 931889, tzinfo=utc)), preserve_default=False, ), migrations.AlterField( model_name='datarequestprojectmember', name='message_permission', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='datarequestprojectmember', name='sources_shared', field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, size=None), ), migrations.AlterField( model_name='datarequestprojectmember', name='username_shared', field=models.BooleanField(default=False), ), migrations.AddField( model_name='datarequestproject', name='slug', field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True), ), migrations.AddField( model_name='datarequestprojectmember', name='revoked', field=models.BooleanField(default=False), ), migrations.AlterModelOptions( name='datarequestproject', options={}, ), migrations.AddField( model_name='datarequestprojectmember', name='authorized', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='datarequestproject', name='slug', field=autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', unique=True), ), migrations.AddField( model_name='datarequestproject', name='is_academic_or_nonprofit', field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=False, verbose_name='Is this institution or organization an academic institution or non-profit organization?'), preserve_default=False, ), migrations.AddField( model_name='datarequestprojectmember', name='consent_text', field=models.TextField(blank=True), ), migrations.RemoveField( model_name='datarequestproject', name='api_access_secret', ), migrations.AddField( model_name='datarequestproject', name='master_access_token', field=models.CharField(default=common.utils.generate_id, max_length=64), ), migrations.AddField( model_name='datarequestprojectmember', name='joined', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='datarequestproject', name='request_sources_access', field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"), ), migrations.AlterField( model_name='datarequestproject', name='organization', field=models.CharField(blank=True, max_length=100, verbose_name='Organization or institution'), ), migrations.AddField( model_name='datarequestproject', name='returned_data_description', field=models.CharField(blank=True, help_text="Leave this blank if your project doesn't plan to add or return new data for your members.", max_length=140, verbose_name='Description of data you plan to upload to member accounts (140 characters max)'), ), migrations.AlterField( model_name='datarequestproject', name='active', field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings of activities that can be joined by participants, and\nnew data sharing authorizations cannot occur. Projects which are "active" but\nnot approved may have some information shared in an "In Development" section,\nso Open Humans members can see potential upcoming studies. Removing "active"\nstatus from a project will not remove any uploaded files from a project\nmember\'s profile.'), ), migrations.AddField( model_name='datarequestproject', name='token_expiration_date', field=models.DateTimeField(default=private_sharing.models.now_plus_24_hours), ), migrations.AddField( model_name='datarequestproject', name='token_expiration_disabled', field=models.BooleanField(default=False), ), ]
from collections import defaultdict import os import json from logger import Logger from util import uncamel #Future proof a bit try: #Python 2 from ConfigParser import SafeConfigParser as ConfigParser except ImportError: #Python 3 from configparser import ConfigParser class GameObjectMeta(type): def __new__(meta, name, bases, dct): if '_name' not in dct: dct['_name'] = uncamel(name) if '_plural' not in dct: dct['_plural'] = dct['_name'] + 's' if '_relations' in dct: for key in dct['_relations']: @property def getter(self, key=key): id = getattr(self, key+'_id') return self.game.objects.get(id, None) @getter.setter def getter(self, value, key=key): return setattr(self, key+'_id', value.id) dct[key] = getter if '_remotes' in dct: for key, source in dct['_remotes'].items(): @property def getter(self, key=key, source=source): return getattr(getattr(self, source), key) dct[key] = getter cls = type.__new__(meta, name, bases, dct) #record the type in its game cls._game._object_types[name] = cls return cls class GameObject(object): # Root game object _game_state_attributes = set() def __init__(self, game, **attributes): #Bypass the __setattr__ method when setting the game object.__setattr__(self, 'game', game) self._new = True for key in self._game_state_attributes: setattr(self, key, None) #And the initial id, so id is defined object.__setattr__(self, 'id', game.next_id()) game.add_object(self) self.removed = False for key, value in attributes.items(): if key in self._game_state_attributes: setattr(self, key, value) def __setattr__(self, name, value): #We need to record changes for the game logs old = getattr(self, name, None) object.__setattr__(self, name, value) if self.game and \ not self._new and \ self.id in self.game.objects and \ name in self._game_state_attributes and \ old != value: self.game.changes[self.id][name] = value def jsonize(self): attributes = dict((key, getattr(self, key)) for key in self._game_state_attributes) attributes['id'] = self.id return attributes def remove(self): self.removed = True del self.game.objects[self.id] def before_turn(self): pass def after_turn(self): pass class GameMeta(type): def __new__(meta, name, bases, dct): if '_relations' in dct: for key in dct['_relations']: @property def getter(self, key=key): id = getattr(self, key+'_id') return self.objects.get(id, None) @getter.setter def getter(self, value, key=key): return setattr(self, key+'_id', value.id) dct[key] = getter cls = type.__new__(meta, name, bases, dct) cls._object_types = {} class Object(GameObject): _name = 'game_object' _game = cls #GameObject can't have the metaclass because it has no game __metaclass__ = GameObjectMeta cls.Object = Object return cls class Game(object): _object_types = {} _globals = [] __metaclass__ = GameMeta start_time = 10 time_inc = 1 # Shell game to show interaction def __init__(self, details): self.highest_id = -1 self.additions = [] self.changes = defaultdict(dict) self.global_changes = {} self.removals = [] self.current_player = None self.objects = ObjectHolder(self) self.connections = [] self.state = 'new' self.details = details self.game_name = details['game_name'] self.logger = Logger(self) #convenience so the utilities for game objects also work on games self.game = self for i in self._globals: setattr(self, i, None) def add_object(self, object): self.additions.append(object) self.objects.add(object) def next_id(self): self.highest_id += 1 return self.highest_id def send_all(self, message): for i in self.connections: i.connection.send_json(message) #TODO: Server-side glog self.logger.write(message) def flush(self): output = [] for added in self.additions: output.append({'action':'add', 'values': added.jsonize(), 'type': added.__class__.__name__}) added._new = False for id, values in self.changes.items(): output.append({'action': 'update', 'id': id, 'values': values}) if self.global_changes: output.append({'action':'global_update', 'values': self.global_changes}) for removed in self.removals: output.append({'action': 'remove', 'id': removed.id}) self.additions = [] self.changes = defaultdict(dict) self.global_changes = {} self.removals = [] if not output: return True message = {'type': 'changes', 'args': {'changes': output}} self.send_all(message) return True def add_connection(self, connection, details): if self.state != 'new': return False self.connections.append(connection) if len(self.connections) == 2: self.start() return True def remove_connection(self, connection): if self.state == 'running': players = [i for i in self.players if i._connection is connection] if len(players) == 1: player = players[0] other = self.players[1 - player.id] self.end_game(other, 'disconnect') if connection in self.connections: self.connections.remove(connection) def start(self): self.state = 'running' for i in self.connections: Player = self._object_types['Player'] player = Player(self, name = i.connection.username) player.time = self.start_time #Link the player to the connection, so we can easily associate them player._connection = i i.send_json({'type': 'player_id', 'args': {'id': player.id}}) self.turn_number = -1 self.before_start() self.flush() self.send_all({'type': 'start_game'}) self.start_turn() def start_turn(self): self.turn_number += 1 self.player_id = self.turn_number % 2 self.current_player = self.players[self.player_id] self.current_player.time += self.time_inc self.before_turn() for i in self.objects.values(): i.before_turn() self.flush() self.send_all({'type': 'start_turn'}) def end_turn(self): self.send_all({'type': 'end_turn'}) self.after_turn() for i in self.objects.values(): i.after_turn() self.flush() winner, reason = self.check_winner() if winner: self.end_game(winner, reason) else: self.start_turn() def end_game(self, winner, reason): self.state = 'over' self.winner = winner.id self.send_all({'type': 'game_over', 'args': {'winner': winner.id, 'reason': reason}}) self.objects.clear() self.logger.close() def __setattr__(self, key, value): object.__setattr__(self, key, value) if key in self._globals: self.global_changes[key] = value def load_config(self, name): def parse(value): try: return json.loads(value) except: return value if '.' not in name: name += '.cfg' path = os.path.join('plugins', self._name, 'config', name) parser = ConfigParser() parser.optionxform = str parser.readfp(open(path)) config = {key:parse(value) for key, value in parser.items('DEFAULT')} for s in parser.sections(): config[s] = {key:parse(value) for key, value in parser.items(s)} return config class ObjectHolder(dict): def __init__(self, game): dict.__init__(self) self.game = game for i in game._object_types.values(): setattr(self.game, i._plural, []) def add(self, value): if not isinstance(value, self.game.Object): raise ValueError("Received object was not a game object") self[value.id] = value def clear(self): dict.clear(self) for i in self.game._object_types.values(): setattr(self.game, i._plural, []) def __setitem__(self, key, value): if key in self: del self[key] dict.__setitem__(self, key, value) for name, cls in self.game._object_types.items(): if isinstance(value, cls): getattr(self.game, cls._plural).append(value) def __delitem__(self, key): value = self[key] dict.__delitem__(self, key) self.game.removals.append(value) for i in self.game._object_types.values(): list = getattr(self.game, i._plural) if value in list: list.remove(value)
from django.core.management.base import BaseCommand, CommandError from api import models from pprint import pprint import urllib2, urllib import json import sys import datetime def imageURLToDatabase(URL): if URL: return URL.replace('http://i.schoolido.lu/', '') return None class Command(BaseCommand): can_import_settings = True def handle(self, *args, **options): if 'songs' in args: page_url = u'http://schoolido.lu/api/songs/?page_size=50&expand_event' while page_url is not None: response = urllib.urlopen(page_url) data = json.loads(response.read()) page_url = data['next'] for song in data['results']: data = { 'romaji_name': song['romaji_name'], 'translated_name': song['translated_name'], 'attribute': song['attribute'], 'BPM': song['BPM'], 'time': song['time'], 'main_unit': song['main_unit'], 'event': models.Event.objects.get(japanese_name=song['event']['japanese_name']) if song['event'] else None, 'rank': song['rank'], 'daily_rotation': song['daily_rotation'], 'daily_rotation_position': song['daily_rotation_position'], 'image': imageURLToDatabase(song['image']), 'easy_difficulty': song['easy_difficulty'], 'easy_notes': song['easy_notes'], 'normal_difficulty': song['normal_difficulty'], 'normal_notes': song['normal_notes'], 'hard_difficulty': song['hard_difficulty'], 'hard_notes': song['hard_notes'], 'expert_difficulty': song['expert_difficulty'], 'expert_random_difficulty': song['expert_random_difficulty'], 'expert_notes': song['expert_notes'], 'master_difficulty': song['master_difficulty'], 'master_notes': song['master_notes'], 'available': song['available'], 'itunes_id': song['itunes_id'], } print u'======== Song {} ========'.format(song['name']) pprint(data) models.Song.objects.update_or_create(name=song['name'], defaults=data) return if 'idols' in args: page_url = u'http://schoolido.lu/api/idols/?page_size=50' while page_url is not None: response = urllib.urlopen(page_url) data = json.loads(response.read()) page_url = data['next'] for idol in data['results']: data = { 'japanese_name': idol['japanese_name'], 'main': idol['main'], 'main_unit': idol['main_unit'], 'sub_unit': idol['sub_unit'], 'age': idol['age'], 'school': idol['school'], 'birthday': datetime.datetime.strptime(idol['birthday'], '%m-%d').date() if idol['birthday'] else None, 'astrological_sign': idol['astrological_sign'], 'blood': idol['blood'], 'height': idol['height'], 'measurements': idol['measurements'], 'favorite_food': idol['favorite_food'], 'least_favorite_food': idol['least_favorite_food'], 'hobbies': idol['hobbies'], 'attribute': idol['attribute'], 'year': idol['year'], 'cv': idol['cv']['name'] if idol['cv'] else None, 'cv_url': idol['cv']['url'] if idol['cv'] else None, 'cv_nickname': idol['cv']['nickname'] if idol['cv'] else None, 'cv_twitter': idol['cv']['twitter'] if idol['cv'] else None, 'cv_instagram': idol['cv']['instagram'] if idol['cv'] else None, 'official_url': idol['official_url'], 'summary': idol['summary'], } print u'======== Idol {} ========'.format(idol['name']) pprint(data) models.Idol.objects.update_or_create(name=idol['name'], defaults=data) return if 'events' in args: page_url = u'http://schoolido.lu/api/events/?page_size=50' while page_url is not None: response = urllib.urlopen(page_url) data = json.loads(response.read()) page_url = data['next'] for event in data['results']: data = { 'romaji_name': event['romaji_name'], 'english_name': event['english_name'], 'english_t1_points': event['english_t1_points'], 'english_t1_rank': event['english_t1_rank'], 'english_t2_points': event['english_t2_points'], 'english_t2_rank': event['english_t2_rank'], 'japanese_t1_points': event['japanese_t1_points'], 'japanese_t1_rank': event['japanese_t1_rank'], 'japanese_t2_points': event['japanese_t2_points'], 'japanese_t2_rank': event['japanese_t2_rank'], 'note': event['note'], 'image': imageURLToDatabase(event['image']), 'english_image': imageURLToDatabase(event['english_image']), 'beginning': event['beginning'], 'end': event['end'], 'english_beginning': event['english_beginning'], 'english_end': event['english_end'], 'english_name': event['english_name'], } print u'======== Event {} ========'.format(event['japanese_name']) pprint(data) models.Event.objects.update_or_create(japanese_name=event['japanese_name'], defaults=data) return if 'cards' in args: page_url = u'http://schoolido.lu/api/cards/?page_size=50&ordering=-id' while page_url is not None: response = urllib.urlopen(page_url) data = json.loads(response.read()) page_url = data['next'] for card in data['results']: data = {} data['idol'] = models.Idol.objects.get(name=card['idol']['name']) if card['event']: data['event'] = models.Event.objects.get(japanese_name=card['event']['japanese_name']) if card['event']: data['event'] = models.Event.objects.get(japanese_name=card['event']['japanese_name']) data['game_id'] = card['game_id'] data['japanese_collection'] = card['japanese_collection'] #data['english_collection'] = card['english_collection'] data['translated_collection'] = card['translated_collection'] data['rarity'] = card['rarity'] data['attribute'] = card['attribute'] data['is_promo'] = card['is_promo'] data['promo_item'] = card['promo_item'] data['promo_link'] = card['promo_link'] data['release_date'] = card['release_date'] data['is_special'] = card['is_special'] data['japan_only'] = card['japan_only'] #data['seal_shop'] = card['seal_shop'] data['hp'] = card['hp'] data['minimum_statistics_smile'] = card['minimum_statistics_smile'] data['minimum_statistics_pure'] = card['minimum_statistics_pure'] data['minimum_statistics_cool'] = card['minimum_statistics_cool'] data['non_idolized_maximum_statistics_smile'] = card['non_idolized_maximum_statistics_smile'] data['non_idolized_maximum_statistics_pure'] = card['non_idolized_maximum_statistics_pure'] data['non_idolized_maximum_statistics_cool'] = card['non_idolized_maximum_statistics_cool'] data['idolized_maximum_statistics_smile'] = card['idolized_maximum_statistics_smile'] data['idolized_maximum_statistics_pure'] = card['idolized_maximum_statistics_pure'] data['idolized_maximum_statistics_cool'] = card['idolized_maximum_statistics_cool'] data['skill'] = card['skill'] data['japanese_skill'] = card['japanese_skill'] data['skill_details'] = card['skill_details'] data['japanese_skill_details'] = card['japanese_skill_details'] data['center_skill'] = card['center_skill'] data['transparent_image'] = imageURLToDatabase(card['transparent_image']) data['transparent_idolized_image'] = imageURLToDatabase(card['transparent_idolized_image']) data['card_image'] = imageURLToDatabase(card['card_image']) data['card_idolized_image'] = imageURLToDatabase(card['card_idolized_image']) data['english_card_image'] = imageURLToDatabase(card['english_card_image']) data['english_card_idolized_image'] = imageURLToDatabase(card['english_card_idolized_image']) data['round_card_image'] = imageURLToDatabase(card['round_card_image']) data['round_card_idolized_image'] = imageURLToDatabase(card['round_card_idolized_image']) data['english_round_card_image'] = imageURLToDatabase(card['english_round_card_image']) data['english_round_card_idolized_image'] = imageURLToDatabase(card['english_round_card_idolized_image']) data['clean_ur'] = imageURLToDatabase(card['clean_ur']) data['clean_ur_idolized'] = imageURLToDatabase(card['clean_ur_idolized']) data['video_story'] = card['video_story'] data['japanese_video_story'] = card['japanese_video_story'] print '======== Card #{} ========'.format(card['id']) pprint(data) models.Card.objects.update_or_create(id=card['id'], defaults=data) return if 'ur_pairs' in args: page_url = u'http://schoolido.lu/api/cards/?page_size=50&rarity=UR' while page_url is not None: response = urllib.urlopen(page_url) data = json.loads(response.read()) page_url = data['next'] for card in data['results']: data = {} pprint(card) data['ur_pair'] = models.Card.objects.get(pk=card['ur_pair']['card']['id']) if card['ur_pair'] else None data['ur_pair_reverse'] = card['ur_pair']['reverse_display'] if card['ur_pair'] else False data['ur_pair_idolized_reverse'] = card['ur_pair']['reverse_display_idolized'] if card['ur_pair'] else False data['clean_ur'] = card['clean_ur'] data['clean_ur_idolized'] = imageURLToDatabase(card['clean_ur_idolized']) print '======== Card #{} ========'.format(card['id']) pprint(data) models.Card.objects.update_or_create(id=card['id'], defaults=data) return if 'imageURLs' in args: cards = models.Card.objects.all() for card in cards: card.card_idolized_image = 'cards/' + str(card.id) + 'idolized' + card.name.split(' ')[-1] + '.png' card.transparent_idolized_image = 'cards/transparent/' + str(card.id) + 'idolizedTransparent.png' card.round_card_idolized_image = 'cards/' + str(card.id) + 'RoundIdolized' + card.name.split(' ')[-1] + '.png' if not card.is_special and not card.is_promo: card.card_image = 'cards/' + str(card.id) + card.name.split(' ')[-1] + '.png' card.transparent_image = 'cards/transparent/' + str(card.id) + 'Transparent.png' card.round_card_image = 'cards/' + str(card.id) + 'Round' + card.name.split(' ')[-1] + '.png' else: card.card_image = None card.transparent_image = None card.round_card_image = None card.save() return
import unittest from nose.tools import assert_equal, assert_list_equal, nottest, raises from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer import numpy as np import pandas as pd from py_stringsimjoin.filter.position_filter import PositionFilter from py_stringsimjoin.utils.converter import dataframe_column_to_str from py_stringsimjoin.utils.generic_helper import remove_redundant_attrs # test PositionFilter.filter_pair method class FilterPairTestCases(unittest.TestCase): def setUp(self): self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True) self.qg2 = QgramTokenizer(2) # tests for JACCARD measure def test_jac_dlm_08_prune(self): self.test_filter_pair('aa bb cc dd ee', 'cc dd', self.dlm, 'JACCARD', 0.8, False, False, True) def test_jac_dlm_08_pass(self): self.test_filter_pair('aa bb cc dd ee', 'aa cc dd ee', self.dlm, 'JACCARD', 0.8, False, False, False) # tests for COSINE measure def test_cos_dlm_08_prune(self): self.test_filter_pair('aa bb cc dd ee', 'cc dd', self.dlm, 'COSINE', 0.8, False, False, True) def test_cos_dlm_08_pass(self): self.test_filter_pair('aa bb cc dd ee', 'aa cc dd ee', self.dlm, 'COSINE', 0.8, False, False, False) # tests for DICE measure def test_dice_dlm_08_prune(self): self.test_filter_pair('aa bb cc dd ee', 'cc dd', self.dlm, 'DICE', 0.8, False, False, True) def test_dice_dlm_08_pass(self): self.test_filter_pair('aa bb cc dd ee', 'aa cc dd ee', self.dlm, 'DICE', 0.8, False, False, False) # tests for OVERLAP measure def test_overlap_dlm_2_prune(self): self.test_filter_pair('aa bb cc dd ee', 'xx yy zz ee', self.dlm, 'OVERLAP', 2, False, False, True) def test_overlap_dlm_2_pass(self): self.test_filter_pair('aa bb zz cc ee', 'xx yy zz ee', self.dlm, 'OVERLAP', 2, False, False, False) def test_overlap_dlm_empty(self): self.test_filter_pair('', '', self.dlm, 'OVERLAP', 1, False, False, True) def test_overlap_dlm_empty_with_allow_empty(self): self.test_filter_pair('', '', self.dlm, 'OVERLAP', 1, True, False, True) # tests for EDIT_DISTANCE measure def test_edit_dist_qg2_prune(self): self.test_filter_pair('1990', '1985', self.qg2, 'EDIT_DISTANCE', 1, False, False, True) def test_edit_dist_qg2_pass(self): self.test_filter_pair('1890', '1995', self.qg2, 'EDIT_DISTANCE', 2, False, False, False) def test_edit_dist_qg2_empty(self): self.test_filter_pair('', '', self.qg2, 'EDIT_DISTANCE', 1, False, False, False) def test_edit_dist_qg2_empty_with_allow_empty(self): self.test_filter_pair('', '', self.qg2, 'EDIT_DISTANCE', 1, True, False, False) def test_edit_dist_qg2_no_padding_empty(self): self.test_filter_pair('', '', QgramTokenizer(2, padding=False), 'EDIT_DISTANCE', 1, False, False, False) # test allow_missing flag def test_pos_filter_pass_missing_left(self): self.test_filter_pair(None, 'fg ty', self.dlm, 'DICE', 0.8, False, True, False) def test_pos_filter_pass_missing_right(self): self.test_filter_pair('fg ty', np.NaN, self.dlm, 'DICE', 0.8, False, True, False) def test_pos_filter_pass_missing_both(self): self.test_filter_pair(None, np.NaN, self.dlm, 'DICE', 0.8, False, True, False) # tests for empty string input def test_empty_lstring(self): self.test_filter_pair('ab', '', self.dlm, 'JACCARD', 0.8, False, False, True) def test_empty_rstring(self): self.test_filter_pair('', 'ab', self.dlm, 'JACCARD', 0.8, False, False, True) def test_empty_strings(self): self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8, False, False, True) def test_empty_strings_with_allow_empty(self): self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8, True, False, False) @nottest def test_filter_pair(self, lstring, rstring, tokenizer, sim_measure_type, threshold, allow_empty, allow_missing, expected_output): position_filter = PositionFilter(tokenizer, sim_measure_type, threshold, allow_empty, allow_missing) actual_output = position_filter.filter_pair(lstring, rstring) assert_equal(actual_output, expected_output) # test PositionFilter.filter_tables method class FilterTablesTestCases(unittest.TestCase): def setUp(self): self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True) self.A = pd.DataFrame([{'id': 1, 'attr':'ab cd ef aa bb'}, {'id': 2, 'attr':''}, {'id': 3, 'attr':'ab'}, {'id': 4, 'attr':'ll oo he'}, {'id': 5, 'attr':'xy xx zz fg'}, {'id': 6, 'attr':np.NaN}, {'id': 7, 'attr':''}]) self.B = pd.DataFrame([{'id': 1, 'attr':'zz fg xx'}, {'id': 2, 'attr':'he ll'}, {'id': 3, 'attr':'xy pl ou'}, {'id': 4, 'attr':'aa bb ab ef'}, {'id': 5, 'attr':'fg cd aa ef ab'}, {'id': 6, 'attr':None}, {'id': 7, 'attr':' '}]) self.empty_table = pd.DataFrame(columns=['id', 'attr']) self.default_l_out_prefix = 'l_' self.default_r_out_prefix = 'r_' # tests for JACCARD measure def test_jac_dlm_08(self): expected_pairs = set(['1,4']) self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) def test_jac_dlm_08_with_out_attrs(self): expected_pairs = set(['1,4']) self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['id', 'attr'], ['id', 'attr']), expected_pairs) def test_jac_dlm_08_with_out_prefix(self): expected_pairs = set(['1,4']) self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr'], 'ltable.', 'rtable.'), expected_pairs) # tests for COSINE measure def test_cos_dlm_08(self): expected_pairs = set(['1,5', '1,4', '4,2', '5,1']) self.test_filter_tables(self.dlm, 'COSINE', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) # tests for DICE measure def test_dice_dlm_08(self): expected_pairs = set(['1,5', '1,4', '4,2', '5,1']) self.test_filter_tables(self.dlm, 'DICE', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) # tests for OVERLAP measure def test_overlap_dlm_3(self): expected_pairs = set(['1,4', '1,5', '5,1']) self.test_filter_tables(self.dlm, 'OVERLAP', 3, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) # tests for EDIT_DISTANCE measure def test_edit_distance_qg2_2(self): A = pd.DataFrame([{'l_id': 1, 'l_attr':'19990'}, {'l_id': 2, 'l_attr':'200'}, {'l_id': 3, 'l_attr':'0'}, {'l_id': 4, 'l_attr':''}, {'l_id': 5, 'l_attr':np.NaN}]) B = pd.DataFrame([{'r_id': 1, 'r_attr':'200155'}, {'r_id': 2, 'r_attr':'190'}, {'r_id': 3, 'r_attr':'2010'}, {'r_id': 4, 'r_attr':''}, {'r_id': 5, 'r_attr':np.NaN}, {'r_id': 6, 'r_attr':'18950'}]) qg2_tok = QgramTokenizer(2) expected_pairs = set(['1,2', '1,6', '2,2', '2,3', '3,2', '4,4']) self.test_filter_tables(qg2_tok, 'EDIT_DISTANCE', 2, False, False, (A, B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # test allow_missing flag def test_jac_dlm_08_allow_missing(self): expected_pairs = set(['1,4', '6,1', '6,2', '6,3', '6,4', '6,5', '6,6', '6,7', '1,6', '2,6', '3,6', '4,6', '5,6', '7,6']) self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, True, (self.A, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) # test allow_empty flag def test_jac_dlm_08_allow_empty(self): expected_pairs = set(['1,4', '2,7', '7,7']) self.test_filter_tables(self.dlm, 'JACCARD', 0.8, True, False, (self.A, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) # test allow_empty flag with output attributes def test_jac_dlm_08_allow_empty_with_out_attrs(self): expected_pairs = set(['1,4', '2,7', '7,7']) self.test_filter_tables(self.dlm, 'JACCARD', 0.8, True, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr']), expected_pairs) # test with n_jobs above 1 def test_cos_dlm_08_with_njobs_above_1(self): expected_pairs = set(['1,5', '1,4', '4,2', '5,1']) self.test_filter_tables(self.dlm, 'COSINE', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr'], 'ltable.', 'rtable.', 2), expected_pairs) # test filter attribute of type int def test_jac_qg2_with_filter_attr_of_type_int(self): A = pd.DataFrame([{'l_id': 1, 'l_attr':1990}, {'l_id': 2, 'l_attr':2000}, {'l_id': 3, 'l_attr':0}, {'l_id': 4, 'l_attr':-1}, {'l_id': 5, 'l_attr':1986}]) B = pd.DataFrame([{'r_id': 1, 'r_attr':2001}, {'r_id': 2, 'r_attr':1992}, {'r_id': 3, 'r_attr':1886}, {'r_id': 4, 'r_attr':2007}, {'r_id': 5, 'r_attr':2012}]) dataframe_column_to_str(A, 'l_attr', inplace=True) dataframe_column_to_str(B, 'r_attr', inplace=True) qg2_tok = QgramTokenizer(2, return_set=True) expected_pairs = set(['1,2', '2,1', '2,4', '5,3']) self.test_filter_tables(qg2_tok, 'JACCARD', 0.3, False, False, (A, B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for empty table input def test_empty_ltable(self): expected_pairs = set() self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.empty_table, self.B, 'id', 'id', 'attr', 'attr'), expected_pairs) def test_empty_rtable(self): expected_pairs = set() self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.A, self.empty_table, 'id', 'id', 'attr', 'attr'), expected_pairs) def test_empty_tables(self): expected_pairs = set() self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.empty_table, self.empty_table, 'id', 'id', 'attr', 'attr'), expected_pairs) @nottest def test_filter_tables(self, tokenizer, sim_measure_type, threshold, allow_empty, allow_missing, args, expected_pairs): position_filter = PositionFilter(tokenizer, sim_measure_type, threshold, allow_empty, allow_missing) actual_candset = position_filter.filter_tables(*args) expected_output_attrs = ['_id'] l_out_prefix = self.default_l_out_prefix r_out_prefix = self.default_r_out_prefix # Check for l_out_prefix in args. if len(args) > 8: l_out_prefix = args[8] expected_output_attrs.append(l_out_prefix + args[2]) # Check for r_out_prefix in args. if len(args) > 9: r_out_prefix = args[9] expected_output_attrs.append(r_out_prefix + args[3]) # Check for l_out_attrs in args. if len(args) > 6: if args[6]: l_out_attrs = remove_redundant_attrs(args[6], args[2]) for attr in l_out_attrs: expected_output_attrs.append(l_out_prefix + attr) # Check for r_out_attrs in args. if len(args) > 7: if args[7]: r_out_attrs = remove_redundant_attrs(args[7], args[3]) for attr in r_out_attrs: expected_output_attrs.append(r_out_prefix + attr) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_candset.columns.values), expected_output_attrs) actual_pairs = set() for idx, row in actual_candset.iterrows(): actual_pairs.add(','.join((str(row[l_out_prefix + args[2]]), str(row[r_out_prefix + args[3]])))) # verify whether the actual pairs and the expected pairs match. assert_equal(len(expected_pairs), len(actual_pairs)) common_pairs = actual_pairs.intersection(expected_pairs) assert_equal(len(common_pairs), len(expected_pairs)) # test PositionFilter.filter_candset method class FilterCandsetTestCases(unittest.TestCase): def setUp(self): self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True) self.A = pd.DataFrame([{'l_id': 1, 'l_attr':'ab cd ef aa bb'}, {'l_id': 2, 'l_attr':''}, {'l_id': 3, 'l_attr':'ab'}, {'l_id': 4, 'l_attr':'ll oo he'}, {'l_id': 5, 'l_attr':'xy xx zz fg'}, {'l_id': 6, 'l_attr': np.NaN}]) self.B = pd.DataFrame([{'r_id': 1, 'r_attr':'zz fg xx'}, {'r_id': 2, 'r_attr':'he ll'}, {'r_id': 3, 'r_attr':'xy pl ou'}, {'r_id': 4, 'r_attr':'aa bb ab ef'}, {'r_id': 5, 'r_attr':'fg cd aa ef ab'}, {'r_id': 6, 'r_attr':None}]) # generate cartesian product A x B to be used as candset self.A['tmp_join_key'] = 1 self.B['tmp_join_key'] = 1 self.C = pd.merge(self.A[['l_id', 'tmp_join_key']], self.B[['r_id', 'tmp_join_key']], on='tmp_join_key').drop('tmp_join_key', 1) self.empty_A = pd.DataFrame(columns=['l_id', 'l_attr']) self.empty_B = pd.DataFrame(columns=['r_id', 'r_attr']) self.empty_candset = pd.DataFrame(columns=['l_id', 'r_id']) # tests for JACCARD measure def test_jac_dlm_08(self): expected_pairs = set(['1,4']) self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, False, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for COSINE measure def test_cos_dlm_08(self): expected_pairs = set(['1,5', '1,4', '4,2', '5,1']) self.test_filter_candset(self.dlm, 'COSINE', 0.8, False, False, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for DICE measure def test_dice_dlm_08(self): expected_pairs = set(['1,5', '1,4', '4,2', '5,1']) self.test_filter_candset(self.dlm, 'DICE', 0.8, False, False, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # test allow_missing flag def test_jac_dlm_08_allow_missing(self): expected_pairs = set(['1,4', '6,1', '6,2', '6,3', '6,4', '6,5', '6,6', '1,6', '2,6', '3,6', '4,6', '5,6']) self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, True, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for empty candset input def test_empty_candset(self): expected_pairs = set() self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, False, (self.empty_candset, 'l_id', 'r_id', self.empty_A, self.empty_B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) @nottest def test_filter_candset(self, tokenizer, sim_measure_type, threshold, allow_empty, allow_missing, args, expected_pairs): position_filter = PositionFilter(tokenizer, sim_measure_type, threshold, allow_empty, allow_missing) actual_output_candset = position_filter.filter_candset(*args) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_output_candset.columns.values), list(args[0].columns.values)) actual_pairs = set() for idx, row in actual_output_candset.iterrows(): actual_pairs.add(','.join((str(row[args[1]]), str(row[args[2]])))) # verify whether the actual pairs and the expected pairs match. assert_equal(len(expected_pairs), len(actual_pairs)) common_pairs = actual_pairs.intersection(expected_pairs) assert_equal(len(common_pairs), len(expected_pairs)) class PositionFilterInvalidTestCases(unittest.TestCase): def setUp(self): self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}]) self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}]) self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True) self.sim_measure_type = 'JACCARD' self.threshold = 0.8 @raises(TypeError) def test_invalid_ltable(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables([], self.B, 'A.id', 'B.id', 'A.attr', 'B.attr') @raises(TypeError) def test_invalid_rtable(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, [], 'A.id', 'B.id', 'A.attr', 'B.attr') @raises(AssertionError) def test_invalid_l_key_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.invalid_id', 'B.id', 'A.attr', 'B.attr') @raises(AssertionError) def test_invalid_r_key_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.invalid_id', 'A.attr', 'B.attr') @raises(AssertionError) def test_invalid_l_filter_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.invalid_attr', 'B.attr') @raises(AssertionError) def test_invalid_r_filter_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.invalid_attr') @raises(AssertionError) def test_numeric_l_filter_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.int_attr', 'B.attr') @raises(AssertionError) def test_numeric_r_filter_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.int_attr') @raises(AssertionError) def test_invalid_l_out_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr', ['A.invalid_attr'], ['B.attr']) @raises(AssertionError) def test_invalid_r_out_attr(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, self.threshold) position_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr', ['A.attr'], ['B.invalid_attr']) @raises(TypeError) def test_invalid_tokenizer(self): position_filter = PositionFilter([], self.sim_measure_type, self.threshold) @raises(AssertionError) def test_invalid_tokenizer_for_edit_distance(self): position_filter = PositionFilter(self.tokenizer, 'EDIT_DISTANCE', 2) @raises(TypeError) def test_invalid_sim_measure_type(self): position_filter = PositionFilter(self.tokenizer, 'INVALID_TYPE', self.threshold) @raises(AssertionError) def test_invalid_threshold(self): position_filter = PositionFilter(self.tokenizer, self.sim_measure_type, 1.2)
# # This file is a part of the normalize python library # # normalize is free software: you can redistribute it and/or modify # it under the terms of the MIT License. # # normalize is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. # # You should have received a copy of the MIT license along with # normalize. If not, refer to the upstream repository at # http://github.com/hearsaycorp/normalize # from __future__ import absolute_import from builtins import str, range from past.builtins import basestring from datetime import datetime import six import json from time import time import types import unittest import normalize.exc as exc from normalize.coll import list_of from normalize.record import Record from normalize.visitor import VisitorPattern from .testclasses import ( acent, acent_attributes, JsonStarList, maia, NamedStarList, PullRequest, StarList, StarSystem, Wall, wall_one ) JSON_CAN_DUMP = (basestring, float, dict, list, type(None)) + six.integer_types class SimpleDumper(VisitorPattern): @classmethod def apply(self, value, *args): if isinstance(value, JSON_CAN_DUMP): dumpable = value elif isinstance(value, datetime): dumpable = value.isoformat() else: raise Exception("Can't dump %r" % value) return dumpable class AssertDiffTest(unittest.TestCase): def assertDiffs(self, a, b, expected, **kwargs): differences = set(str(x) for x in a.diff(b, **kwargs)) self.assertEqual( differences, set("<DiffInfo: %s>" % x for x in expected) ) class TestVisitor(AssertDiffTest): def setUp(self): self.acent_json_data = { 'name': 'Alpha Centauri', 'components': [{'hip_id': 71683, 'name': 'Alpha Centauri A'}, {'hip_id': 71681, 'name': 'Alpha Centauri B'}, {'hip_id': 70890, 'name': 'Alpha Centauri C'}], 'attributes': acent_attributes, } self.nsl_json_data = { 'name': 'Alpha Centauri', 'values': self.acent_json_data['components'] } def test_simple_dumper(self): dumpable = SimpleDumper.visit(wall_one) self.assertIsInstance(dumpable['posts'][0], dict) self.assertEqual(dumpable['posts'][0]['edited'], "2001-09-09T01:46:40") json.dumps(dumpable) # assert doesn't throw wall_roundtripped = SimpleDumper.cast(Wall, dumpable) self.assertDiffs(wall_one, wall_roundtripped, {}) self.assertDiffs(wall_one, Wall(dumpable), {}) def test_intro_example_dump(self): dumped = SimpleDumper.visit(acent) self.assertEqual(dumped, self.acent_json_data) def test_intro_example_cast(self): self.assertDiffs(acent, StarSystem(self.acent_json_data), {}) self.assertDiffs( acent, SimpleDumper.cast(StarSystem, self.acent_json_data), {}, ) def test_complex_dump(self): nsl = NamedStarList(acent.components) nsl.name = "Alpha Centauri" dumped = SimpleDumper.visit(nsl) self.assertEqual(dumped, self.nsl_json_data) def test_complex_dump2(self): dumped = SimpleDumper.visit(maia) maia2 = SimpleDumper.cast(type(maia), dumped) self.assertEqual(maia.diff(maia2), []) self.assertEqual(maia2.coordinates['ICRS'][2], "49.60656") self.assertEqual(maia2.designations['HR'], "1149") def test_complex_cast(self): nsl = NamedStarList(**(self.nsl_json_data)) self.assertDiffs( nsl, SimpleDumper.cast(NamedStarList, self.nsl_json_data), {}, ) def test_dump_types(self): typeinfo = SimpleDumper.reflect(NamedStarList) self.assertEqual( typeinfo['itemtype']['properties']['hip_id']['type'], 'int', ) typeinfo = SimpleDumper.reflect(Wall) self.assertEqual(typeinfo['properties']['owner']['name'], 'Person') self.assertEqual( typeinfo['properties']['owner']['properties']['interests']['type'], 'list', ) def test_json_dump(self): plain_list = StarList(self.acent_json_data['components']) json_list = JsonStarList(self.acent_json_data['components']) plain_dumped = SimpleDumper.visit(plain_list) json_dumped = SimpleDumper.visit(json_list) self.assertEqual(plain_dumped, json_dumped) def test_cast_garbage(self): for garbage in ( "green cheese", [], (), {'values': {"foo": "bar"}}, self.acent_json_data['components'], ): with self.assertRaises(exc.VisitorGrokRecordError): SimpleDumper.cast(NamedStarList, garbage) def test_cast_complex_filtered(self): # this works because the properties are filtered out; normally this # filtering would be due to 'extraneous' property settings. # MultiFieldSelector doesn't currently distinguish between 'None' => # all items in collection vs 'None' => all, so use a filter which # mentions each of the items in the set. nsl = SimpleDumper.cast( NamedStarList, self.acent_json_data['components'], visit_filter=tuple([x, 'hip_id'] for x in range(0, 3)), ) self.assertEqual(len(nsl), 3) def test_visit_complex_filtered(self): nsl = NamedStarList(**(self.nsl_json_data)) visited = SimpleDumper.visit( nsl, filter=tuple([x, 'hip_id'] for x in range(0, 3)), ) self.assertEqual( visited, list( {'hip_id': x['hip_id']} for x in self.acent_json_data['components'] ), ) class TestTypeUnionCases(AssertDiffTest): def setUp(self): self.open_pr = PullRequest(number=123, merged_at=None) self.closed_pr = PullRequest( number=456, merged_at=datetime.fromtimestamp(time() - 20 * 86400), ) def test_type_union_dump(self): dumped = SimpleDumper.visit(self.open_pr, ignore_none=False) self.assertIn("created_at", dumped) self.assertRegexpMatches( dumped['created_at'], r'^\d{4}-\d{2}-\d{2}T.*', ) self.assertEqual(dumped['merged_at'], None) dumped = SimpleDumper.visit(self.closed_pr) self.assertRegexpMatches( dumped['created_at'], r'^\d{4}-\d{2}-\d{2}T.*', ) self.assertIn("created_at", dumped) self.assertIn('merged_at', dumped) def test_type_union_load(self): pr_dict = { "number": "5125", "created_at": "2014-07-23T12:34:56Z", "merged_at": None, } my_pr = PullRequest(pr_dict) pr_2 = SimpleDumper.cast(PullRequest, pr_dict, ignore_none=False) self.assertDiffs(my_pr, pr_2, {}) def test_type_union_typeinfo(self): schema = SimpleDumper.reflect(PullRequest) self.assertEqual(schema['properties']['merged_at']['type'], ["datetime", "NoneType"]) def test_cast_collection(self): RecordList = list_of(Record) casted = VisitorPattern.cast(RecordList, [{}, {}]) self.assertIsInstance(casted[0], Record) self.assertIsInstance(casted, RecordList) empty_casted = VisitorPattern.cast(RecordList, []) self.assertIsInstance(empty_casted, RecordList)
# flake8: noqa # yapf: disable # __import_lightning_begin__ import torch import pytorch_lightning as pl from torch.utils.data import DataLoader, random_split from torch.nn import functional as F from torchvision.datasets import MNIST from torchvision import transforms import os # __import_lightning_end__ # __import_tune_begin__ import shutil import tempfile from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.utilities.cloud_io import load as pl_load from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining from ray.tune.integration.pytorch_lightning import TuneReportCallback, \ TuneReportCheckpointCallback # __import_tune_end__ # __lightning_begin__ class LightningMNISTClassifier(pl.LightningModule): """ This has been adapted from https://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09 """ def __init__(self, config, data_dir=None): super(LightningMNISTClassifier, self).__init__() self.data_dir = data_dir or os.getcwd() self.layer_1_size = config["layer_1_size"] self.layer_2_size = config["layer_2_size"] self.lr = config["lr"] self.batch_size = config["batch_size"] # mnist images are (1, 28, 28) (channels, width, height) self.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size) self.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size) self.layer_3 = torch.nn.Linear(self.layer_2_size, 10) def forward(self, x): batch_size, channels, width, height = x.size() x = x.view(batch_size, -1) x = self.layer_1(x) x = torch.relu(x) x = self.layer_2(x) x = torch.relu(x) x = self.layer_3(x) x = torch.log_softmax(x, dim=1) return x def cross_entropy_loss(self, logits, labels): return F.nll_loss(logits, labels) def accuracy(self, logits, labels): _, predicted = torch.max(logits.data, 1) correct = (predicted == labels).sum().item() accuracy = correct / len(labels) return torch.tensor(accuracy) def training_step(self, train_batch, batch_idx): x, y = train_batch logits = self.forward(x) loss = self.cross_entropy_loss(logits, y) accuracy = self.accuracy(logits, y) self.log("ptl/train_loss", loss) self.log("ptl/train_accuracy", accuracy) return loss def validation_step(self, val_batch, batch_idx): x, y = val_batch logits = self.forward(x) loss = self.cross_entropy_loss(logits, y) accuracy = self.accuracy(logits, y) return {"val_loss": loss, "val_accuracy": accuracy} def validation_epoch_end(self, outputs): avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean() self.log("ptl/val_loss", avg_loss) self.log("ptl/val_accuracy", avg_acc) @staticmethod def download_data(data_dir): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) return MNIST(data_dir, train=True, download=True, transform=transform) def prepare_data(self): mnist_train = self.download_data(self.data_dir) self.mnist_train, self.mnist_val = random_split( mnist_train, [55000, 5000]) def train_dataloader(self): return DataLoader(self.mnist_train, batch_size=int(self.batch_size)) def val_dataloader(self): return DataLoader(self.mnist_val, batch_size=int(self.batch_size)) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) return optimizer def train_mnist(config): model = LightningMNISTClassifier(config) trainer = pl.Trainer(max_epochs=10, show_progress_bar=False) trainer.fit(model) # __lightning_end__ # __tune_train_begin__ def train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0): model = LightningMNISTClassifier(config, data_dir) trainer = pl.Trainer( max_epochs=num_epochs, gpus=num_gpus, logger=TensorBoardLogger( save_dir=tune.get_trial_dir(), name="", version="."), progress_bar_refresh_rate=0, callbacks=[ TuneReportCallback( { "loss": "ptl/val_loss", "mean_accuracy": "ptl/val_accuracy" }, on="validation_end") ]) trainer.fit(model) # __tune_train_end__ # __tune_train_checkpoint_begin__ def train_mnist_tune_checkpoint(config, checkpoint_dir=None, data_dir=None, num_epochs=10, num_gpus=0): trainer = pl.Trainer( max_epochs=num_epochs, gpus=num_gpus, logger=TensorBoardLogger( save_dir=tune.get_trial_dir(), name="", version="."), progress_bar_refresh_rate=0, callbacks=[ TuneReportCheckpointCallback( metrics={ "loss": "ptl/val_loss", "mean_accuracy": "ptl/val_accuracy" }, filename="checkpoint", on="validation_end") ]) if checkpoint_dir: # Currently, this leads to errors: # model = LightningMNISTClassifier.load_from_checkpoint( # os.path.join(checkpoint, "checkpoint")) # Workaround: ckpt = pl_load( os.path.join(checkpoint_dir, "checkpoint"), map_location=lambda storage, loc: storage) model = LightningMNISTClassifier._load_model_state( ckpt, config=config, data_dir=data_dir) trainer.current_epoch = ckpt["epoch"] else: model = LightningMNISTClassifier(config=config, data_dir=data_dir) trainer.fit(model) # __tune_train_checkpoint_end__ # __tune_asha_begin__ def tune_mnist_asha(num_samples=10, num_epochs=10, gpus_per_trial=0): data_dir = os.path.join(tempfile.gettempdir(), "mnist_data_") LightningMNISTClassifier.download_data(data_dir) config = { "layer_1_size": tune.choice([32, 64, 128]), "layer_2_size": tune.choice([64, 128, 256]), "lr": tune.loguniform(1e-4, 1e-1), "batch_size": tune.choice([32, 64, 128]), } scheduler = ASHAScheduler( max_t=num_epochs, grace_period=1, reduction_factor=2) reporter = CLIReporter( parameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"], metric_columns=["loss", "mean_accuracy", "training_iteration"]) analysis = tune.run( tune.with_parameters( train_mnist_tune, data_dir=data_dir, num_epochs=num_epochs, num_gpus=gpus_per_trial), resources_per_trial={ "cpu": 1, "gpu": gpus_per_trial }, metric="loss", mode="min", config=config, num_samples=num_samples, scheduler=scheduler, progress_reporter=reporter, name="tune_mnist_asha") print("Best hyperparameters found were: ", analysis.best_config) shutil.rmtree(data_dir) # __tune_asha_end__ # __tune_pbt_begin__ def tune_mnist_pbt(num_samples=10, num_epochs=10, gpus_per_trial=0): data_dir = os.path.join(tempfile.gettempdir(), "mnist_data_") LightningMNISTClassifier.download_data(data_dir) config = { "layer_1_size": tune.choice([32, 64, 128]), "layer_2_size": tune.choice([64, 128, 256]), "lr": 1e-3, "batch_size": 64, } scheduler = PopulationBasedTraining( perturbation_interval=4, hyperparam_mutations={ "lr": tune.loguniform(1e-4, 1e-1), "batch_size": [32, 64, 128] }) reporter = CLIReporter( parameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"], metric_columns=["loss", "mean_accuracy", "training_iteration"]) analysis = tune.run( tune.with_parameters( train_mnist_tune_checkpoint, data_dir=data_dir, num_epochs=num_epochs, num_gpus=gpus_per_trial), resources_per_trial={ "cpu": 1, "gpu": gpus_per_trial }, metric="loss", mode="min", config=config, num_samples=num_samples, scheduler=scheduler, progress_reporter=reporter, name="tune_mnist_pbt") print("Best hyperparameters found were: ", analysis.best_config) shutil.rmtree(data_dir) # __tune_pbt_end__ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() if args.smoke_test: tune_mnist_asha(num_samples=1, num_epochs=6, gpus_per_trial=0) tune_mnist_pbt(num_samples=1, num_epochs=6, gpus_per_trial=0) else: # ASHA scheduler tune_mnist_asha(num_samples=10, num_epochs=10, gpus_per_trial=0) # Population based training tune_mnist_pbt(num_samples=10, num_epochs=10, gpus_per_trial=0)
"""Distutils script for cx_Logging. Windows platforms: python setup.py build --compiler=mingw32 install Unix platforms python setup.py build install """ import distutils.command.bdist_rpm import distutils.command.build_ext import distutils.command.install_data import distutils.util import os import sys from distutils.core import setup from distutils.extension import Extension from distutils import sysconfig BUILD_VERSION = "2.1" # define class to ensure the file name given includes the Python version class bdist_rpm(distutils.command.bdist_rpm.bdist_rpm): def run(self): distutils.command.bdist_rpm.bdist_rpm.run(self) specFile = os.path.join(self.rpm_base, "SPECS", "%s.spec" % self.distribution.get_name()) queryFormat = "%{name}-%{version}-%{release}.%{arch}.rpm" command = "rpm -q --qf '%s' --specfile %s" % (queryFormat, specFile) origFileName = os.popen(command).read() parts = origFileName.split("-") parts.insert(2, "py%s%s" % sys.version_info[:2]) newFileName = "-".join(parts) self.move_file(os.path.join("dist", origFileName), os.path.join("dist", newFileName)) # define class to ensure that linking against the library works for normal # C programs while maintaining the name that Python expects class build_ext(distutils.command.build_ext.build_ext): import distutils import os import sys if sys.platform == "win32": user_options = distutils.command.build_ext.build_ext.user_options + [ ('build-implib=', None, 'directory for import library') ] def build_extension(self, ext): import distutils.command.build_ext import os import sys extraLinkArgs = ext.extra_link_args = [] if sys.platform == "win32": self.mkpath(self.build_implib) if self.compiler.compiler_type == "msvc": self.importLibraryName = os.path.join(self.build_implib, "%s.lib" % ext.name) extraLinkArgs.append("/IMPLIB:%s" % self.importLibraryName) else: self.importLibraryName = os.path.join(self.build_implib, "lib%s.a" % ext.name) extraLinkArgs.append("-Wl,--add-stdcall-alias") extraLinkArgs.append("-Wl,--enable-stdcall-fixup") extraLinkArgs.append("-Wl,--out-implib=%s" % \ self.importLibraryName) ext.libraries = ["ole32"] else: fileName = self.get_ext_filename(ext.name) if sys.platform.startswith("aix"): extraLinkArgs.append("-Wl,-so%s" % fileName) else: extraLinkArgs.append("-Wl,-soname,%s" % fileName) distutils.command.build_ext.build_ext.build_extension(self, ext) def finalize_options(self): import distutils.command.build_ext distutils.command.build_ext.build_ext.finalize_options(self) import os import sys if sys.platform == "win32" and self.build_implib is None: dir = "implib.%s-%s" % \ (distutils.util.get_platform(), sys.version[:3]) self.build_implib = os.path.join("build", dir) def initialize_options(self): import distutils.command.build_ext distutils.command.build_ext.build_ext.initialize_options(self) import sys if sys.platform == "win32": self.build_implib = None # define class to ensure that the import library (Windows) is installed # properly; this is not relevant on other platforms class install_data(distutils.command.install_data.install_data): def run(self): distutils.command.install_data.install_data.run(self) if sys.platform == "win32": command = self.get_finalized_command("build_ext") dir = os.path.join(self.install_dir, "Libs") self.mkpath(dir) baseName = os.path.basename(command.importLibraryName) targetFileName = os.path.join(dir, baseName) self.copy_file(command.importLibraryName, targetFileName) self.outfiles.append(targetFileName) # setup macros defineMacros = [ ("CX_LOGGING_CORE", None), ("BUILD_VERSION", BUILD_VERSION) ] # define the list of files to be included as documentation for Windows dataFiles = None if sys.platform in ("win32", "cygwin"): baseName = "cx_Logging-doc" dataFiles = [(baseName, [ "LICENSE.TXT", "README.TXT", "HISTORY.txt"])] for dir in ("html", "html/_static", "test"): files = [] fullDirName = "%s/%s" % (baseName, dir) for name in os.listdir(dir): if name.startswith("."): continue fullName = "%s/%s" % (dir, name) if os.path.isdir(fullName): continue files.append(fullName) dataFiles.append((fullDirName, files)) docFiles = "LICENSE.txt HISTORY.txt README.txt html test" options = dict(bdist_rpm = dict(doc_files = docFiles)) # define the classifiers for the project classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Python Software Foundation License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: C", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities" ] exportSymbols = [ "StartLogging", "StartLoggingEx", "StartLoggingForPythonThread", "StartLoggingForPythonThreadEx", "StartLoggingStderr", "StartLoggingStderrEx", "StartLoggingStdout", "StartLoggingStdoutEx", "StartLoggingFromEnvironment", "StopLogging", "StopLoggingForPythonThread", "LogMessage", "LogMessageV", "LogMessageVaList", "LogMessageForPythonV", "WriteMessageForPython", "LogDebug", "LogInfo", "LogWarning", "LogError", "LogCritical", "LogTrace", "GetLoggingLevel", "SetLoggingLevel", "LogPythonObject", "LogPythonException", "LogPythonExceptionWithTraceback", "LogConfiguredException", "GetLoggingState", "SetLoggingState", "IsLoggingStarted", "IsLoggingAtLevelForPython" ] if sys.platform == "win32": exportSymbols.extend([ "LogWin32Error", "LogGUID", "StartLoggingW", "StartLoggingExW", "LogMessageW", "LogDebugW", "LogInfoW", "LogWarningW", "LogErrorW", "LogCriticalW", "LogTraceW" ]) # setup the extension extension = Extension( name = "cx_Logging", define_macros = defineMacros, export_symbols = exportSymbols, sources = ["cx_Logging.c"], depends = ["cx_Logging.h"]) # perform the setup setup( name = "cx_Logging", cmdclass = dict(build_ext = build_ext, install_data = install_data, bdist_rpm = bdist_rpm), version = BUILD_VERSION, description = "Python and C interfaces for logging", data_files = dataFiles, long_description = "Python and C interfaces for logging", author = "Anthony Tuininga", author_email = "anthony.tuininga@gmail.com", maintainer = "Anthony Tuininga", maintainer_email = "anthony.tuininga@gmail.com", url = "http://cx-logging.sourceforge.net", keywords = "logging", classifiers = classifiers, license = "Python Software Foundation License", ext_modules = [extension], options = options)
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import unittest from makani.lib.python.embedded import test_fixture from makani.lib.python.embedded import test_runner import mock import numpy class _FakeChildProcess(object): """Simulate pexpect child process.""" def __init__(self, expect_returns, exit_status=0): self.mock_expect = mock.Mock(side_effect=expect_returns) self.exitstatus = exit_status def expect(self, *args, **kwargs): # pylint: disable=unused-argument, invalid-name return self.mock_expect() def close(self, *args, **kwargs): # pylint: disable=unused-argument, invalid-name pass def isalive(self): return False class _FakeChildSpawner(object): def __init__(self, expect_returns, exit_status=0): self._expect_returns = expect_returns self._exit_status = exit_status self._count = 0 def next(self): # pylint: disable=invalid-name if isinstance(self._exit_status, list): exit_status = self._exit_status[self._count % len(self._exit_status)] else: exit_status = self._exit_status self._count += 1 return _FakeChildProcess(self._expect_returns, exit_status) class TestCommandWorker(unittest.TestCase): """Test CommandWorker class.""" def setUp(self): self._command_args = { 'log_extra': {'TestId': 0, 'TestNodeId': 0, 'TargetId': 0}, 'command': 'echo', 'timeout': 10, } def runTest(self, expect_returns, exit_status, clock): return_value = _FakeChildProcess(expect_returns, exit_status) with contextlib.nested( mock.patch.object(test_runner.time, 'time', side_effect=clock), mock.patch.object(test_runner.pexpect, 'spawn', return_value=return_value)) as (_, mock_spawn): worker = test_runner.CommandWorker(**self._command_args) final_count = 0 for count in range(len(expect_returns) + 1): if not worker.Iterate(): final_count = count break self.assertEqual(final_count, len(expect_returns) - 1) self.assertEqual(worker.Close(), exit_status == 0) self.assertEqual(worker._child.mock_expect.call_count, len(expect_returns)) mock_spawn.assert_called_once_with(command=self._command_args['command'], logfile=worker._logfile, timeout=self._command_args['timeout']) def testNormal(self): self.runTest(expect_returns=[0, 0, 0, 1], exit_status=0, clock=[0.0] * 10) def testReturnFailure(self): self.runTest(expect_returns=[0, 0, 0, 1], exit_status=1, clock=[0.0] * 10) def testTimeout(self): clock = numpy.arange(0.0, self._command_args['timeout'] * 10.0, 1.0) self.runTest(expect_returns=[0] * self._command_args['timeout'], exit_status=1, clock=clock) class TestJobWorker(unittest.TestCase): def setUp(self): self._command_args = { 'log_extra': {'TestId': 0, 'TestNodeId': 0, 'TargetId': 0}, 'command': 'echo', 'timeout': 10, } def testNormal(self): w1 = test_runner.CommandWorker(**self._command_args) w2 = test_runner.CommandWorker(**self._command_args) w1_iterate = [True, True, True, False] w2_iterate = [True, True, True, True, True, False] with contextlib.nested( mock.patch.object(test_runner.time, 'time', return_value=0), mock.patch.object(w1, 'Iterate', side_effect=w1_iterate), mock.patch.object(w1, 'Close', return_value=True), mock.patch.object(w2, 'Iterate', side_effect=w2_iterate), mock.patch.object(w2, 'Close', return_value=True)): worker = test_runner.JobWorker() worker.jobs.append(w1) worker.jobs.append(w2) final_count = 0 for count in range(len(w1_iterate) + len(w2_iterate) + 1): if not worker.Iterate(): final_count = count break self.assertEqual(final_count, len(w1_iterate) + len(w2_iterate) - 1) self.assertTrue(worker.Close()) def testOneJobFailed(self): w1 = test_runner.CommandWorker(**self._command_args) w2 = test_runner.CommandWorker(**self._command_args) w1_iterate = [True, True, True, False] w2_iterate = [True, True, True, True, True, False] with contextlib.nested( mock.patch.object(test_runner.time, 'time', return_value=0), mock.patch.object(w1, 'Iterate', side_effect=w1_iterate), mock.patch.object(w1, 'Close', return_value=False), mock.patch.object(w2, 'Iterate', side_effect=w2_iterate), mock.patch.object(w2, 'Close', return_value=True)): worker = test_runner.JobWorker() worker.jobs.append(w1) worker.jobs.append(w2) final_count = 0 for count in range(len(w1_iterate) + len(w2_iterate) + 1): if not worker.Iterate(): final_count = count break self.assertEqual(final_count, len(w1_iterate) - 1) self.assertFalse(worker.Close()) class TestNodeWorker(unittest.TestCase): def setUp(self): self._paths = { 'tms570_bin': '/path/to/tms570-bin', 'q7_bin': '/path/to/q7-bin', 'host_bin': '/path/to/host-bin'} self._config = { 'FirmwareTms570': 'tms570_application.elf', 'FirmwareQ7': 'q7_application', 'CalibParams': 'calib_params.bin', 'ConfigParams': 'config_params.bin', 'HostApplication': None, 'HostArguments': '', 'Timeout': 10, 'TestId': 0, 'TestNodeId': 0, 'TargetId': 0} self._target = { 'AioNodeTms570': 'kAioNodeFcA', 'AioNodeQ7': 'kAioNodeControllerA'} def testNormal(self): spawner = _FakeChildSpawner([0, 0, 0, 1]) with contextlib.nested( mock.patch.object(test_runner.time, 'time', return_value=0), mock.patch.object(test_runner.pexpect, 'spawn', side_effect=spawner)): worker = test_runner.NodeWorker(self._config, self._target, self._paths) final_count = 0 for count in range(100): if not worker.Iterate(): final_count = count break self.assertGreater(final_count, 0) self.assertTrue(worker.Close()) def testFailure(self): spawner = _FakeChildSpawner([0, 0, 0, 1], [0, 0, 1]) with contextlib.nested( mock.patch.object(test_runner.time, 'time', return_value=0), mock.patch.object(test_runner.pexpect, 'spawn', side_effect=spawner)): worker = test_runner.NodeWorker(self._config, self._target, self._paths) final_count = 0 for count in range(100): if not worker.Iterate(): final_count = count break self.assertGreater(final_count, 0) self.assertFalse(worker.Close()) class TestTestRunner(unittest.TestCase): def setUp(self): # Define test fixture hardware. self._fixture = test_fixture.TestFixture(database=':memory:') self._fixture._relay = mock.Mock(test_fixture.RelayManager) # Insert RelayModule. relay_port = 1 relay_config = { 'Device': '192.168.1.200', 'Channels': 20, 'Type': 'MockedRelay'} relay_module_id = self._fixture._db.Insert('RelayModules', relay_config) # Insert Targets. self._fixture._db.InsertTarget({ 'AioNodeTms570': 'kAioNodeFcA', 'AioNodeQ7': 'kAioNodeControllerA', 'BoardName': 'fc', 'HardwareOptions': 'imu novatel', 'RelayModuleId': relay_module_id, 'RelayPort': relay_port, }) relay_port += 1 self._fixture._db.InsertTarget({ 'AioNodeTms570': 'kAioNodeFcB', 'AioNodeQ7': 'kAioNodeControllerB', 'BoardName': 'fc', 'HardwareOptions': 'imu septentrio', 'RelayModuleId': relay_module_id, 'RelayPort': relay_port, }) relay_port += 1 self._fixture._db.InsertTarget({ 'AioNodeTms570': 'kAioNodeFcC', 'AioNodeQ7': 'kAioNodeControllerC', 'BoardName': 'fc', 'HardwareOptions': 'imu hemisphere', 'RelayModuleId': relay_module_id, 'RelayPort': relay_port, }) motors = ['Pbi', 'Pbo', 'Pti', 'Pto', 'Sbi', 'Sbo', 'Sti', 'Sto'] for motor in motors: self._fixture._db.InsertTarget({ 'AioNodeTms570': 'kAioNodeMotor' + motor, 'BoardName': 'motor', 'RelayModuleId': relay_module_id, 'RelayPort': relay_port, }) relay_port += 1 servos = ['A1', 'A2', 'A4', 'A5', 'A7', 'A8', 'E1', 'E2', 'R1', 'R2'] for servo in servos: self._fixture._db.InsertTarget({ 'AioNodeTms570': 'kAioNodeServo' + servo, 'BoardName': 'aio', 'CarrierName': 'servo', 'RelayModuleId': relay_module_id, 'RelayPort': relay_port, }) relay_port += 1 # Define test runner. paths = { 'tms570_bin': '/path/to/tms570-bin', 'q7_bin': '/path/to/q7-bin', 'host_bin': '/path/to/host-bin'} self._runner = test_runner.TestRunner(paths=paths, database=self._fixture._db) # Insert tests into runner. test1_node1 = { 'FirmwareTms570': 'tms570_application.elf', 'FirmwareQ7': 'q7_application', 'CalibParams': 'calib_params.bin', 'ConfigParams': 'config_params.bin', 'Timeout': 30, 'HardwareOptions': 'novatel'} test1_node2 = { 'FirmwareTms570': 'tms570_application.elf', 'FirmwareQ7': 'q7_application', 'ConfigParams': 'config_params.bin', 'BoardName': 'fc', 'Timeout': 30, 'HardwareOptions': 'septentrio'} test2_node1 = { 'FirmwareTms570': 'tms570_application.elf', 'CalibParams': 'calib_params.bin', 'Timeout': 30, 'HardwareOptions': 'imu'} self._runner.InsertTest([test1_node1, test1_node2]) self._runner.InsertTest([test2_node1]) def runTest(self, spawner): with contextlib.nested( mock.patch.object(test_runner.time, 'time', return_value=0), mock.patch.object(test_runner.pexpect, 'spawn', side_effect=spawner)): final_count = 0 for count in range(100): self._fixture.Iterate() if not self._runner.Iterate(): final_count = count break return final_count def testNormal(self): final_count = self.runTest(_FakeChildSpawner([0, 0, 0, 1])) self.assertGreater(final_count, 0) self.assertFalse(self._runner.GetError()) def testFailure(self): final_count = self.runTest(_FakeChildSpawner([0, 0, 0, 1], [0, 0, 0, 1])) self.assertGreater(final_count, 0) self.assertTrue(self._runner.GetError()) def testInvalidHardware(self): test3_node1 = { 'FirmwareTms570': 'tms570_application.elf', 'Timeout': 30, 'HardwareOptions': 'invalid'} self._runner.InsertTest([test3_node1]) final_count = self.runTest(_FakeChildSpawner([0, 0, 0, 1])) self.assertGreater(final_count, 0) self.assertTrue(self._runner.GetError()) def testHostApplication(self): test3_node1 = { 'HostApplication': 'avionics/linux/test_command', 'HostArguments': '--node=${AIO_NODE_TMS570} --timeout=${TIMEOUT}', 'Timeout': 30} self._runner.InsertTest([test3_node1]) final_count = self.runTest(_FakeChildSpawner([0, 0, 0, 1])) self.assertGreater(final_count, 0) self.assertFalse(self._runner.GetError()) if __name__ == '__main__': unittest.main()
import re from moto.dynamodb2.exceptions import ( InvalidTokenException, InvalidExpressionAttributeNameKey, ) class Token(object): _TOKEN_INSTANCE = None MINUS_SIGN = "-" PLUS_SIGN = "+" SPACE_SIGN = " " EQUAL_SIGN = "=" OPEN_ROUND_BRACKET = "(" CLOSE_ROUND_BRACKET = ")" COMMA = "," SPACE = " " DOT = "." OPEN_SQUARE_BRACKET = "[" CLOSE_SQUARE_BRACKET = "]" SPECIAL_CHARACTERS = [ MINUS_SIGN, PLUS_SIGN, SPACE_SIGN, EQUAL_SIGN, OPEN_ROUND_BRACKET, CLOSE_ROUND_BRACKET, COMMA, SPACE, DOT, OPEN_SQUARE_BRACKET, CLOSE_SQUARE_BRACKET, ] # Attribute: an identifier that is an attribute ATTRIBUTE = 0 # Place holder for attribute name ATTRIBUTE_NAME = 1 # Placeholder for attribute value starts with : ATTRIBUTE_VALUE = 2 # WhiteSpace shall be grouped together WHITESPACE = 3 # Placeholder for a number NUMBER = 4 PLACEHOLDER_NAMES = { ATTRIBUTE: "Attribute", ATTRIBUTE_NAME: "AttributeName", ATTRIBUTE_VALUE: "AttributeValue", WHITESPACE: "Whitespace", NUMBER: "Number", } def __init__(self, token_type, value): assert ( token_type in self.SPECIAL_CHARACTERS or token_type in self.PLACEHOLDER_NAMES ) self.type = token_type self.value = value def __repr__(self): if isinstance(self.type, int): return 'Token("{tt}", "{tv}")'.format( tt=self.PLACEHOLDER_NAMES[self.type], tv=self.value ) else: return 'Token("{tt}", "{tv}")'.format(tt=self.type, tv=self.value) def __eq__(self, other): return self.type == other.type and self.value == other.value class ExpressionTokenizer(object): """ Takes a string and returns a list of tokens. While attribute names in DynamoDB must be between 1 and 255 characters long there are no other restrictions for attribute names. For expressions however there are additional rules. If an attribute name does not adhere then it must be passed via an ExpressionAttributeName. This tokenizer is aware of the rules of Expression attributes. We consider a Token as a tuple which has the tokenType From https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html 1) If an attribute name begins with a number or contains a space, a special character, or a reserved word, you must use an expression attribute name to replace that attribute's name in the expression. => So spaces,+,- or other special characters do identify tokens in update expressions 2) When using a dot (.) in an attribute name you must use expression-attribute-names. A dot in an expression will be interpreted as a separator in a document path 3) For a nested structure if you want to use expression_attribute_names you must specify one per part of the path. Since for members of expression_attribute_names the . is part of the name """ @classmethod def is_simple_token_character(cls, character): return character.isalnum() or character in ("_", ":", "#") @classmethod def is_possible_token_boundary(cls, character): return ( character in Token.SPECIAL_CHARACTERS or not cls.is_simple_token_character(character) ) @classmethod def is_expression_attribute(cls, input_string): return re.compile("^[a-zA-Z0-9][a-zA-Z0-9_]*$").match(input_string) is not None @classmethod def is_expression_attribute_name(cls, input_string): """ https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html An expression attribute name must begin with a pound sign (#), and be followed by one or more alphanumeric characters. """ return input_string.startswith("#") and cls.is_expression_attribute( input_string[1:] ) @classmethod def is_expression_attribute_value(cls, input_string): return re.compile("^:[a-zA-Z0-9_]*$").match(input_string) is not None def raise_unexpected_token(self): """If during parsing an unexpected token is encountered""" if len(self.token_list) == 0: near = "" else: if len(self.token_list) == 1: near = self.token_list[-1].value else: if self.token_list[-1].type == Token.WHITESPACE: # Last token was whitespace take 2nd last token value as well to help User orientate near = self.token_list[-2].value + self.token_list[-1].value else: near = self.token_list[-1].value problematic_token = self.staged_characters[0] raise InvalidTokenException(problematic_token, near + self.staged_characters) def __init__(self, input_expression_str): self.input_expression_str = input_expression_str self.token_list = [] self.staged_characters = "" @classmethod def make_list(cls, input_expression_str): assert isinstance(input_expression_str, str) return ExpressionTokenizer(input_expression_str)._make_list() def add_token(self, token_type, token_value): self.token_list.append(Token(token_type, token_value)) def add_token_from_stage(self, token_type): self.add_token(token_type, self.staged_characters) self.staged_characters = "" @classmethod def is_numeric(cls, input_str): return re.compile("[0-9]+").match(input_str) is not None def process_staged_characters(self): if len(self.staged_characters) == 0: return if self.staged_characters.startswith("#"): if self.is_expression_attribute_name(self.staged_characters): self.add_token_from_stage(Token.ATTRIBUTE_NAME) else: raise InvalidExpressionAttributeNameKey(self.staged_characters) elif self.is_numeric(self.staged_characters): self.add_token_from_stage(Token.NUMBER) elif self.is_expression_attribute(self.staged_characters): self.add_token_from_stage(Token.ATTRIBUTE) elif self.is_expression_attribute_value(self.staged_characters): self.add_token_from_stage(Token.ATTRIBUTE_VALUE) else: self.raise_unexpected_token() def _make_list(self): """ Just go through characters if a character is not a token boundary stage it for adding it as a grouped token later if it is a tokenboundary process staged characters and then process the token boundary as well. """ for character in self.input_expression_str: if not self.is_possible_token_boundary(character): self.staged_characters += character else: self.process_staged_characters() if character == Token.SPACE: if ( len(self.token_list) > 0 and self.token_list[-1].type == Token.WHITESPACE ): self.token_list[-1].value = ( self.token_list[-1].value + character ) else: self.add_token(Token.WHITESPACE, character) elif character in Token.SPECIAL_CHARACTERS: self.add_token(character, character) elif not self.is_simple_token_character(character): self.staged_characters += character self.raise_unexpected_token() else: raise NotImplementedError( "Encountered character which was not implemented : " + character ) self.process_staged_characters() return self.token_list
# # Copyright (C) 2009-2020 the sqlparse authors and contributors # <see AUTHORS file> # # This module is part of python-sqlparse and is released under # the BSD License: https://opensource.org/licenses/BSD-3-Clause """This module contains classes representing syntactical elements of SQL.""" import re from sqlparse import tokens as T from sqlparse.utils import imt, remove_quotes class NameAliasMixin: """Implements get_real_name and get_alias.""" def get_real_name(self): """Returns the real name (object name) of this identifier.""" # a.b dot_idx, _ = self.token_next_by(m=(T.Punctuation, '.')) return self._get_first_name(dot_idx, real_name=True) def get_alias(self): """Returns the alias for this identifier or ``None``.""" # "name AS alias" kw_idx, kw = self.token_next_by(m=(T.Keyword, 'AS')) if kw is not None: return self._get_first_name(kw_idx + 1, keywords=True) # "name alias" or "complicated column expression alias" _, ws = self.token_next_by(t=T.Whitespace) if len(self.tokens) > 2 and ws is not None: return self._get_first_name(reverse=True) class Token: """Base class for all other classes in this module. It represents a single token and has two instance attributes: ``value`` is the unchanged value of the token and ``ttype`` is the type of the token. """ __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword', 'is_group', 'is_whitespace') def __init__(self, ttype, value): value = str(value) self.value = value self.ttype = ttype self.parent = None self.is_group = False self.is_keyword = ttype in T.Keyword self.is_whitespace = self.ttype in T.Whitespace self.normalized = value.upper() if self.is_keyword else value def __str__(self): return self.value # Pending tokenlist __len__ bug fix # def __len__(self): # return len(self.value) def __repr__(self): cls = self._get_repr_name() value = self._get_repr_value() q = '"' if value.startswith("'") and value.endswith("'") else "'" return "<{cls} {q}{value}{q} at 0x{id:2X}>".format( id=id(self), **locals()) def _get_repr_name(self): return str(self.ttype).split('.')[-1] def _get_repr_value(self): raw = str(self) if len(raw) > 7: raw = raw[:6] + '...' return re.sub(r'\s+', ' ', raw) def flatten(self): """Resolve subgroups.""" yield self def match(self, ttype, values, regex=False): """Checks whether the token matches the given arguments. *ttype* is a token type. If this token doesn't match the given token type. *values* is a list of possible values for this token. The values are OR'ed together so if only one of the values matches ``True`` is returned. Except for keyword tokens the comparison is case-sensitive. For convenience it's OK to pass in a single string. If *regex* is ``True`` (default is ``False``) the given values are treated as regular expressions. """ type_matched = self.ttype is ttype if not type_matched or values is None: return type_matched if isinstance(values, str): values = (values,) if regex: # TODO: Add test for regex with is_keyboard = false flag = re.IGNORECASE if self.is_keyword else 0 values = (re.compile(v, flag) for v in values) for pattern in values: if pattern.search(self.normalized): return True return False if self.is_keyword: values = (v.upper() for v in values) return self.normalized in values def within(self, group_cls): """Returns ``True`` if this token is within *group_cls*. Use this method for example to check if an identifier is within a function: ``t.within(sql.Function)``. """ parent = self.parent while parent: if isinstance(parent, group_cls): return True parent = parent.parent return False def is_child_of(self, other): """Returns ``True`` if this token is a direct child of *other*.""" return self.parent == other def has_ancestor(self, other): """Returns ``True`` if *other* is in this tokens ancestry.""" parent = self.parent while parent: if parent == other: return True parent = parent.parent return False class TokenList(Token): """A group of tokens. It has an additional instance attribute ``tokens`` which holds a list of child-tokens. """ __slots__ = 'tokens' def __init__(self, tokens=None): self.tokens = tokens or [] [setattr(token, 'parent', self) for token in self.tokens] super().__init__(None, str(self)) self.is_group = True def __str__(self): return ''.join(token.value for token in self.flatten()) # weird bug # def __len__(self): # return len(self.tokens) def __iter__(self): return iter(self.tokens) def __getitem__(self, item): return self.tokens[item] def _get_repr_name(self): return type(self).__name__ def _pprint_tree(self, max_depth=None, depth=0, f=None, _pre=''): """Pretty-print the object tree.""" token_count = len(self.tokens) for idx, token in enumerate(self.tokens): cls = token._get_repr_name() value = token._get_repr_value() last = idx == (token_count - 1) pre = '`- ' if last else '|- ' q = '"' if value.startswith("'") and value.endswith("'") else "'" print("{_pre}{pre}{idx} {cls} {q}{value}{q}" .format(**locals()), file=f) if token.is_group and (max_depth is None or depth < max_depth): parent_pre = ' ' if last else '| ' token._pprint_tree(max_depth, depth + 1, f, _pre + parent_pre) def get_token_at_offset(self, offset): """Returns the token that is on position offset.""" idx = 0 for token in self.flatten(): end = idx + len(token.value) if idx <= offset < end: return token idx = end def flatten(self): """Generator yielding ungrouped tokens. This method is recursively called for all child tokens. """ for token in self.tokens: if token.is_group: yield from token.flatten() else: yield token def get_sublists(self): for token in self.tokens: if token.is_group: yield token @property def _groupable_tokens(self): return self.tokens def _token_matching(self, funcs, start=0, end=None, reverse=False): """next token that match functions""" if start is None: return None if not isinstance(funcs, (list, tuple)): funcs = (funcs,) if reverse: assert end is None for idx in range(start - 2, -1, -1): token = self.tokens[idx] for func in funcs: if func(token): return idx, token else: for idx, token in enumerate(self.tokens[start:end], start=start): for func in funcs: if func(token): return idx, token return None, None def token_first(self, skip_ws=True, skip_cm=False): """Returns the first child token. If *skip_ws* is ``True`` (the default), whitespace tokens are ignored. if *skip_cm* is ``True`` (default: ``False``), comments are ignored too. """ # this on is inconsistent, using Comment instead of T.Comment... def matcher(tk): return not ((skip_ws and tk.is_whitespace) or (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(matcher)[1] def token_next_by(self, i=None, m=None, t=None, idx=-1, end=None): idx += 1 return self._token_matching(lambda tk: imt(tk, i, m, t), idx, end) def token_not_matching(self, funcs, idx): funcs = (funcs,) if not isinstance(funcs, (list, tuple)) else funcs funcs = [lambda tk: not func(tk) for func in funcs] return self._token_matching(funcs, idx) def token_matching(self, funcs, idx): return self._token_matching(funcs, idx)[1] def token_prev(self, idx, skip_ws=True, skip_cm=False): """Returns the previous token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. If *skip_cm* is ``True`` comments are ignored. ``None`` is returned if there's no previous token. """ return self.token_next(idx, skip_ws, skip_cm, _reverse=True) # TODO: May need to re-add default value to idx def token_next(self, idx, skip_ws=True, skip_cm=False, _reverse=False): """Returns the next token relative to *idx*. If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. If *skip_cm* is ``True`` comments are ignored. ``None`` is returned if there's no next token. """ if idx is None: return None, None idx += 1 # alot of code usage current pre-compensates for this def matcher(tk): return not ((skip_ws and tk.is_whitespace) or (skip_cm and imt(tk, t=T.Comment, i=Comment))) return self._token_matching(matcher, idx, reverse=_reverse) def token_index(self, token, start=0): """Return list index of token.""" start = start if isinstance(start, int) else self.token_index(start) return start + self.tokens[start:].index(token) def group_tokens(self, grp_cls, start, end, include_end=True, extend=False): """Replace tokens by an instance of *grp_cls*.""" start_idx = start start = self.tokens[start_idx] end_idx = end + include_end # will be needed later for new group_clauses # while skip_ws and tokens and tokens[-1].is_whitespace: # tokens = tokens[:-1] if extend and isinstance(start, grp_cls): subtokens = self.tokens[start_idx + 1:end_idx] grp = start grp.tokens.extend(subtokens) del self.tokens[start_idx + 1:end_idx] grp.value = str(start) else: subtokens = self.tokens[start_idx:end_idx] grp = grp_cls(subtokens) self.tokens[start_idx:end_idx] = [grp] grp.parent = self for token in subtokens: token.parent = grp return grp def insert_before(self, where, token): """Inserts *token* before *where*.""" if not isinstance(where, int): where = self.token_index(where) token.parent = self self.tokens.insert(where, token) def insert_after(self, where, token, skip_ws=True): """Inserts *token* after *where*.""" if not isinstance(where, int): where = self.token_index(where) nidx, next_ = self.token_next(where, skip_ws=skip_ws) token.parent = self if next_ is None: self.tokens.append(token) else: self.tokens.insert(nidx, token) def has_alias(self): """Returns ``True`` if an alias is present.""" return self.get_alias() is not None def get_alias(self): """Returns the alias for this identifier or ``None``.""" return None def get_name(self): """Returns the name of this identifier. This is either it's alias or it's real name. The returned valued can be considered as the name under which the object corresponding to this identifier is known within the current statement. """ return self.get_alias() or self.get_real_name() def get_real_name(self): """Returns the real name (object name) of this identifier.""" return None def get_parent_name(self): """Return name of the parent object if any. A parent object is identified by the first occurring dot. """ dot_idx, _ = self.token_next_by(m=(T.Punctuation, '.')) _, prev_ = self.token_prev(dot_idx) return remove_quotes(prev_.value) if prev_ is not None else None def _get_first_name(self, idx=None, reverse=False, keywords=False, real_name=False): """Returns the name of the first token with a name""" tokens = self.tokens[idx:] if idx else self.tokens tokens = reversed(tokens) if reverse else tokens types = [T.Name, T.Wildcard, T.String.Symbol] if keywords: types.append(T.Keyword) for token in tokens: if token.ttype in types: return remove_quotes(token.value) elif isinstance(token, (Identifier, Function)): return token.get_real_name() if real_name else token.get_name() class Statement(TokenList): """Represents a SQL statement.""" def get_type(self): """Returns the type of a statement. The returned value is a string holding an upper-cased reprint of the first DML or DDL keyword. If the first token in this group isn't a DML or DDL keyword "UNKNOWN" is returned. Whitespaces and comments at the beginning of the statement are ignored. """ first_token = self.token_first(skip_cm=True) if first_token is None: # An "empty" statement that either has not tokens at all # or only whitespace tokens. return 'UNKNOWN' elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL): return first_token.normalized elif first_token.ttype == T.Keyword.CTE: # The WITH keyword should be followed by either an Identifier or # an IdentifierList containing the CTE definitions; the actual # DML keyword (e.g. SELECT, INSERT) will follow next. fidx = self.token_index(first_token) tidx, token = self.token_next(fidx, skip_ws=True) if isinstance(token, (Identifier, IdentifierList)): _, dml_keyword = self.token_next(tidx, skip_ws=True) if dml_keyword is not None \ and dml_keyword.ttype == T.Keyword.DML: return dml_keyword.normalized # Hmm, probably invalid syntax, so return unknown. return 'UNKNOWN' class Identifier(NameAliasMixin, TokenList): """Represents an identifier. Identifiers may have aliases or typecasts. """ def is_wildcard(self): """Return ``True`` if this identifier contains a wildcard.""" _, token = self.token_next_by(t=T.Wildcard) return token is not None def get_typecast(self): """Returns the typecast or ``None`` of this object as a string.""" midx, marker = self.token_next_by(m=(T.Punctuation, '::')) nidx, next_ = self.token_next(midx, skip_ws=False) return next_.value if next_ else None def get_ordering(self): """Returns the ordering or ``None`` as uppercase string.""" _, ordering = self.token_next_by(t=T.Keyword.Order) return ordering.normalized if ordering else None def get_array_indices(self): """Returns an iterator of index token lists""" for token in self.tokens: if isinstance(token, SquareBrackets): # Use [1:-1] index to discard the square brackets yield token.tokens[1:-1] class IdentifierList(TokenList): """A list of :class:`~sqlparse.sql.Identifier`\'s.""" def get_identifiers(self): """Returns the identifiers. Whitespaces and punctuations are not included in this generator. """ for token in self.tokens: if not (token.is_whitespace or token.match(T.Punctuation, ',')): yield token class TypedLiteral(TokenList): """A typed literal, such as "date '2001-09-28'" or "interval '2 hours'".""" M_OPEN = [(T.Name.Builtin, None), (T.Keyword, "TIMESTAMP")] M_CLOSE = T.String.Single, None M_EXTEND = T.Keyword, ("DAY", "HOUR", "MINUTE", "MONTH", "SECOND", "YEAR") class Parenthesis(TokenList): """Tokens between parenthesis.""" M_OPEN = T.Punctuation, '(' M_CLOSE = T.Punctuation, ')' @property def _groupable_tokens(self): return self.tokens[1:-1] class SquareBrackets(TokenList): """Tokens between square brackets""" M_OPEN = T.Punctuation, '[' M_CLOSE = T.Punctuation, ']' @property def _groupable_tokens(self): return self.tokens[1:-1] class Assignment(TokenList): """An assignment like 'var := val;'""" class If(TokenList): """An 'if' clause with possible 'else if' or 'else' parts.""" M_OPEN = T.Keyword, 'IF' M_CLOSE = T.Keyword, 'END IF' class For(TokenList): """A 'FOR' loop.""" M_OPEN = T.Keyword, ('FOR', 'FOREACH') M_CLOSE = T.Keyword, 'END LOOP' class Comparison(TokenList): """A comparison used for example in WHERE clauses.""" @property def left(self): return self.tokens[0] @property def right(self): return self.tokens[-1] class Comment(TokenList): """A comment.""" def is_multiline(self): return self.tokens and self.tokens[0].ttype == T.Comment.Multiline class Where(TokenList): """A WHERE clause.""" M_OPEN = T.Keyword, 'WHERE' M_CLOSE = T.Keyword, ( 'ORDER BY', 'GROUP BY', 'LIMIT', 'UNION', 'UNION ALL', 'EXCEPT', 'HAVING', 'RETURNING', 'INTO') class Having(TokenList): """A HAVING clause.""" M_OPEN = T.Keyword, 'HAVING' M_CLOSE = T.Keyword, ('ORDER BY', 'LIMIT') class Case(TokenList): """A CASE statement with one or more WHEN and possibly an ELSE part.""" M_OPEN = T.Keyword, 'CASE' M_CLOSE = T.Keyword, 'END' def get_cases(self, skip_ws=False): """Returns a list of 2-tuples (condition, value). If an ELSE exists condition is None. """ CONDITION = 1 VALUE = 2 ret = [] mode = CONDITION for token in self.tokens: # Set mode from the current statement if token.match(T.Keyword, 'CASE'): continue elif skip_ws and token.ttype in T.Whitespace: continue elif token.match(T.Keyword, 'WHEN'): ret.append(([], [])) mode = CONDITION elif token.match(T.Keyword, 'THEN'): mode = VALUE elif token.match(T.Keyword, 'ELSE'): ret.append((None, [])) mode = VALUE elif token.match(T.Keyword, 'END'): mode = None # First condition without preceding WHEN if mode and not ret: ret.append(([], [])) # Append token depending of the current mode if mode == CONDITION: ret[-1][0].append(token) elif mode == VALUE: ret[-1][1].append(token) # Return cases list return ret class Function(NameAliasMixin, TokenList): """A function or procedure call.""" def get_parameters(self): """Return a list of parameters.""" parenthesis = self.tokens[-1] for token in parenthesis.tokens: if isinstance(token, IdentifierList): return token.get_identifiers() elif imt(token, i=(Function, Identifier), t=T.Literal): return [token, ] return [] class Begin(TokenList): """A BEGIN/END block.""" M_OPEN = T.Keyword, 'BEGIN' M_CLOSE = T.Keyword, 'END' class Operation(TokenList): """Grouping of operations""" class Values(TokenList): """Grouping of values""" class Command(TokenList): """Grouping of CLI commands."""
#!/usr/bin/env python # # Dan Levin <dlevin@net.t-labs.tu-berlin.de> # Brandon Heller <brandonh@stanford.edu> import logging from random import choice import sys import matplotlib.pyplot as plt import networkx as nx from resource_allocator import ResourceAllocator logger = logging.getLogger(__name__) class Controller(ResourceAllocator): """ Generic controller -- does not implement control logic: """ def __init__(self, sw=[], srv=[], graph=None, name=""): """ sw: list of switch names governed by this controller srv: list of servers known by this controller to which requests may be dispatched sent graph: A copy of the simulation graph is given to each controller instance at the time of simulation initialization name: string representation, should be unique in a simulation mylinks: a list of links in the self.graph which are goverend by this controller, inferred from switches active_flows: used to track the (timeout, path) of all active flows """ self.switches = sw self.servers = srv self.graph = graph self.name = name self.active_flows = [] # Inferred from graph self.localservers = [] self.mylinks = [] def __str__(self): return "Controller %s of: %s" % (self.name, str(self.switches)) def set_name(self, name): self.name = name def set_graph(self, graph): self.name = graph def get_switches(self): return self.switches def handle_request(self): raise NotImplementedError("Controller does not implement __name__") def sync_toward(self, ctrl=None): raise NotImplementedError("Controller does not implement __name__") class LinkBalancerCtrl(Controller): """ Control logic for link balancer: Tracks link capacities of associated switches, and decides how to map requests such to minimize the maximum link utilization over all visible links """ def __init__(self, *args, **kwargs): """Reuse __init__ of our superclass""" super(LinkBalancerCtrl, self).__init__(*args, **kwargs) def learn_local_servers(self): """ Learn the servers of the sim graph that are within my domain Requrires that the controller be initialized by the simulation """ assert len(self.mylinks) > 0 assert len(self.switches) > 0 assert self.graph != None localservers = [] for srv in self.servers: neighbor_sw = self.graph.neighbors(srv) if len(neighbor_sw) != 1: raise NotImplementedError("Single server links only") else: neighbor_sw = neighbor_sw[0] if (neighbor_sw in self.switches): localservers.append(srv) # remove duplicates self.localservers = list(set(localservers)) def learn_my_links(self): """ Learn the links of a graph that are directly observable by me e.g. which are directly connected to my switches Optionally, learn my links from a graph that is not my own """ assert (self.graph != None) links = self.graph.edges() mylinks = [] for link in links: u, v = link[:2] if (v in self.switches or u in self.switches): self.graph[u][v]['mylink'] = True mylinks.append((u, v)) # remove duplicates self.mylinks = list(set(mylinks)) def update_my_state(self, simgraph): """ This action is akin to when a controller polls the switchport counters of its switches: The controller will update the 'used' values each link in the simulation graph which it governs """ for link in self.mylinks: u, v = link if not (self.graph[u][v]['used'] == simgraph[u][v]['used']): self.graph[u][v]['used'] = simgraph[u][v]['used'] def sync_toward(self, dstctrl, specificedges=None, timestep=None): """ Share the utilization state of links goverend by this controller with another controller in a "push" fashion Optionally specify only specific links (edges) to share with the other dstctrl In the corner case, where a link crosses a domain, its state is owned by both controllers and not modified during sync. When two controllers share ownership of a link and hold different state for it, the controllers can not resolve their different views throgh sync. In the simulation, this scenario will never emerge as long as controllers learn their link state (learn_my_state) from the simulation graph before handling requests. """ if (specificedges): mylinks = specificedges else: mylinks = self.mylinks for link in mylinks: u, v = link # A controller should only accept state updates to links that do # not belong to its own domain. if not (dstctrl.graph[u][v].get('mylink')): dstctrl.graph[u][v]['used'] = self.graph[u][v]['used'] dstctrl.graph[u][v]['timestamp'] = timestep logging.debug("%s syncs toward %s" % (self.name, dstctrl.name)) def get_srv_paths(self, sw, graph=None, local=False): """ Return a list of all paths from available servers to the entry switch which can respond. We make the assumption here that the path list (routing) is known and static If local , Return only paths to servers within this controller's domain """ if graph == None: graph = self.graph paths = [] if local: avail_srvs = self.localservers else: avail_srvs = self.servers assert graph != None assert len(sw) > 0 assert len(avail_srvs)> 0 for server in avail_srvs: paths.append(nx.shortest_path(graph, server, sw)) return paths def compute_path_metric(self, sw, path, util, time_now): """ Return a pathmetric rating the utilization of the path pathmetric is a real number in [0,1] which is the max (worst) of all linkmetrics for all links in the path """ pathmetric = 1 linkmetrics = [] links = zip(path[:-1], path[1:]) # calculate available capacity for each link in path for link in links: u, v = link #DESIGN CHOICE: Should we 1) always include extra-domain state, 2) #only include extra-domain state when not stale (timestamp), 3) always exclude #extra-domain state when calculating the path metric? Here we do (1) used = self.graph[u][v]['used'] + util capacity = self.graph[u][v]['capacity'] linkmetric = float(used) / capacity # If the controller estimates it would oversubscribe this link if linkmetric > 1: logging.info("[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]", str(time_now), linkmetric, str(sw)) break else: linkmetrics.append(linkmetric) # We define pathmetric to be the worst link metric in path if len(linkmetrics) > 0: pathmetric = max(linkmetrics) funname = sys._getframe().f_code.co_name logging.debug("[%s] [%s] [%s] [%s]", funname, str(time_now), str(self), str((path, linkmetrics))) return (pathmetric, len(links)) def find_best_path(self, paths, sw, util, duration, time_now): bestpath = None bestpathmetric = None # [0,1] lower -> better path bestpathlen = None # lower -> better path for path in paths: pathmetric, pathlen = self.compute_path_metric(sw, path, util, time_now) #DESIGN CHOICE: We pick the path with the best pathmetric. # If multiple path metrics tie, we pick the path with the shortest # length if (bestpathmetric == None): bestpath = path bestpathmetric = pathmetric bestpathlen = pathlen elif (pathmetric < bestpathmetric): bestpath = path bestpathmetric = pathmetric bestpathlen = pathlen elif (pathmetric == bestpathmetric and pathlen < bestpathlen): bestpath = path bestpathmetric = pathmetric bestpathlen = pathlen if (bestpath == None): return None funname = sys._getframe().f_code.co_name logging.debug("[%s] [%s] [%s] [%s] [%s] [%s]", funname, str(time_now), str(self), str(bestpath), str(bestpathlen), str(bestpathmetric)) return (bestpath, bestpathmetric) def handle_request(self, sw, util, duration, time_now): """ Given a request that utilizes some bandwidth for a duration, map that request to an available path such that max link bandwidth util is minimized sw: switch at which request arrives util: link utilization to be consumed by this flow duration: time over which flow consumes resources @return the chosen best path as a list of consecutive link pairs ((c1,sw1), (sw1,sw2),...,(sw_n, srv_x)) """ #logging.debug(str(self.graph.edges(data=True))) #1 Get available paths from servers to switch paths = self.get_srv_paths(sw, self.graph) #2 choose the path which mins the max link utilization for all links # along the path bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now) if len(bestpath) > 0: self.allocate_resources(bestpath, util, time_now, duration) else: logging.warn("[%s] No best path found at switch [%s]", str(time_now), str(sw)) return bestpath class GreedyLinkBalancerCtrl(LinkBalancerCtrl): """ A Greedy variant of the LinkBalancerCtrl which assigns all flows only to servers in its own domain (local) until doing so would require the pathmetric to exceed the greedylimit. Only when it is impossible to assign a flow to a local server without the pathmetric exceeding the greedylimit, is the controller allowed to send it to a server out of the domain. greedylimit: A value between [0,1]. A greedylimit of 1 means keep all flows in our domain until doing so would oversubscribe a link. """ def __init__(self, greedylimit, *args, **kwargs): super(GreedyLinkBalancerCtrl, self).__init__(*args, **kwargs) self.greedylimit = greedylimit def handle_request(self, sw, util, duration, time_now): #Find a best path to a server in our domain paths = self.get_srv_paths(sw, self.graph, local=True) bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now) if (bestpm > self.greedylimit): oldbestpath = bestpath oldbestpm = bestpm #If the best path in our domain violates our greedy limit, find a # best path to a server outside our domain if (bestpath == None or bestpm > self.greedylimit): paths = self.get_srv_paths(sw, self.graph) bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now) #DESIGN CHOICE: If the bestpm has a worse pathmetric # than the oldbestpm, should we return oldbestpath instead? if len(bestpath) > 0: self.allocate_resources(bestpath, util, time_now, duration) else: logging.warn("[%s] No best path found at switch [%s]", str(time_now), str(sw)) logging.debug(str(bestpath)) return bestpath class SeparateStateLinkBalancerCtrl(LinkBalancerCtrl): """ This controller keeps extra-domain link state obtained through sync events separate from extra-domain state inferred through tracking its contribution to extra-domain contributed load. alpha: Scaling factor for redistributing the load across links between sync events """ def __init__(self, alpha, *args, **kwargs): super(SeparateStateLinkBalancerCtrl, self).__init__(*args, **kwargs) self.alpha = alpha def sync_toward(self, dstctrl, specificedges=None, timestep=None): """ Share the utilization state of links goverend by this controller with another controller in a "push" fashion Optionally specify only specific links (edges) to share with the other dstctrl """ if (specificedges): mylinks = specificedges else: mylinks = self.mylinks for link in mylinks: u, v = link # A controller should only accept state updates to links that do # not belong to its own domain. if not (dstctrl.graph[u][v].get('mylink')): dstctrl.graph[u][v]['sync_learned'] = self.graph[u][v]['used'] dstctrl.graph[u][v]['timestamp'] = timestep logging.debug("%s syncs toward %s" % (self.name, dstctrl.name)) def compute_path_metric(self, sw, path, util, time_now, local_contrib): """ Return a pathmetric rating the utilization of the path pathmetric is a real number in [0,1] which is the max (worst) of all linkmetrics for all links in the path """ pathmetric = 1 linkmetrics = [] links = zip(path[:-1], path[1:]) # calculate available capacity for each link in path for link in links: u, v = link # Use the last-learned-via-sync value for a link if (not local_contrib) and 'sync_learned' in self.graph[u][v]: used1 = self.graph[u][v]['sync_learned'] + util used2 = self.graph[u][v]['used'] + util # ['used'] is a strict lower bound for ['sync_learned'] if used1 > used2: used = used1 logging.debug("CS [%s] using sync_learned value 1 [%f]", str(self.name), used1) else: used = used2 logging.debug("CS [%s] using sync_learned value 2 [%f]", str(self.name), used2) else: logging.debug("CS [%s] using tracking value", str(self.name)) used = self.graph[u][v]['used'] + util capacity = self.graph[u][v]['capacity'] linkmetric = float(used) / capacity # If the controller estimates it would oversubscribe this link if linkmetric > 1: logging.info("[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]", str(time_now), linkmetric, str(sw)) break else: linkmetrics.append(linkmetric) # We define pathmetric to be the worst link metric in path if len(linkmetrics) > 0: pathmetric = max(linkmetrics) funname = sys._getframe().f_code.co_name logging.debug("[%s] [%s] [%s] [%s]", funname, str(time_now), str(self), str((path, linkmetrics))) return (pathmetric, len(links)) def calculate_what_to_shift(self, paths, sw): """ Calculate the current ratio of max(sync_learned, my contributed) utilization across two paths corresponds to figure 1 in drawing """ pathmetrics = {} for path in paths: metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=False) assert metric >= 0 pathmetrics[metric] = path metrics = pathmetrics.keys() logging.debug("SS CWTS PATH METRICS:, %s", str(pathmetrics)) balanced_metric = sum(metrics)/len(metrics) if max(metrics) == 0: logging.debug("SS CWTS MAX METRIC is 0") shift_by = 0 shift_from_path = None else: logging.debug("SS max(metrics) is %s", str(max(metrics))) logging.debug("SS balanced metrics is %s", str(balanced_metric)) shift_by = (max(metrics) - balanced_metric)/max(metrics) shift_from_path = pathmetrics[max(metrics)] logging.debug("SS CWTS SHIFT FROM: %s", str(shift_from_path)) logging.debug("SS CWTS SHIFT BY: %s", str(shift_by)) return(shift_from_path, shift_by) def find_best_path(self, paths, sw, util, duration, time_now): """ Calculate the current ratio of my contributed utilization across two paths corresponds to figure 1 in drawing """ bestpath = None bestpathmetric = None # [0,1] lower means better path bestpathlen = None # lower -> better path candidatepaths = [] assert len(paths) == 2 path_to_shift, shift_by = self.calculate_what_to_shift(paths, sw) pathmetrics = {} paths_by_length = {} metrics = [] metricpaths = {} for path in paths: metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=True) paths_by_length[length] = path metrics.append(metric) assert metric >= 0 pathmetrics[" ".join(path)] = metric metricpaths[metric] = path logging.debug("SS FBP PATH METRICS:, %s", str(metricpaths)) if path_to_shift == None: # return shortest path logging.debug("SS FBP Returning LOCAL: %s", str((paths_by_length[min(paths_by_length.keys())],0))) return (paths_by_length[min(paths_by_length.keys())], 0) path_to_shift_metric = pathmetrics.pop(" ".join(path_to_shift)) path_to_receive_metric = pathmetrics.pop(pathmetrics.keys()[0]) logging.debug("SS FBP Path to Recv: %s", str(metricpaths[path_to_receive_metric])) if (path_to_receive_metric == 0): logging.debug("SS FBP EARLY Returning : %s", str((metricpaths[min(metrics)], 0))) return (metricpaths[min(metrics)], 0) else: current_ratio = path_to_shift_metric * 1.0 / path_to_receive_metric logging.debug("SS FBP CURRENT RATIO: %s", str(current_ratio)) goal_path_to_shift_metric = path_to_shift_metric * (1 - (shift_by * self.alpha)) goal_path_to_receive_metric = path_to_receive_metric + (path_to_shift_metric * (shift_by * self.alpha)) if (goal_path_to_receive_metric == 0): # large number for practical purposes goal_ratio = 100000 else: goal_ratio = goal_path_to_shift_metric * 1.0 / goal_path_to_receive_metric logging.debug("SS FBP GOAL RATIO: %s", str(goal_ratio)) # FINALLY DECIDE WHICH PATH TO RETURN BASED ON GOAL-Current RATIO if goal_ratio - current_ratio < 0: # return path with lower utiliztion logging.debug("SS FBP LOWER Returning : %s", str((metricpaths[min(metrics)], 0))) return (metricpaths[min(metrics)], 0) if goal_ratio - current_ratio > 0: # return path with higher utilization logging.debug("SS FBP HIGHER Returning : %s", str((metricpaths[max(metrics)], 0))) return (metricpaths[max(metrics)], 0) if goal_ratio - current_ratio == 0: # return shortest path logging.debug("SS FBP Returning LOCAL: %s", str((paths_by_length[min(paths_by_length.keys())], 0))) return (paths_by_length[min(paths_by_length.keys())], 0) class RandomChoiceCtrl(LinkBalancerCtrl): """ This controller picks a path at random """ def __init__(self, *args, **kwargs): super(RandomChoiceCtrl, self).__init__(*args, **kwargs) def handle_request(self, sw, util, duration, time_now): #Find a best path to a server in our domain paths = self.get_srv_paths(sw, self.graph) return choice(paths)
import simplejson from datetime import datetime import dateutil.parser from django.utils.http import urlencode from django.test import TestCase from django.core.urlresolvers import reverse from tastypie.resources import Resource from tastypie import fields from corehq.apps.groups.models import Group from corehq.pillows.reportxform import ReportXFormPillow from couchforms.models import XFormInstance from casexml.apps.case.models import CommCareCase from corehq.pillows.xform import XFormPillow from corehq.pillows.case import CasePillow from corehq.apps.users.models import CommCareUser, WebUser from corehq.apps.domain.models import Domain from corehq.apps.receiverwrapper.models import FormRepeater, CaseRepeater, ShortFormRepeater from corehq.apps.api.resources import v0_1, v0_4, v0_5 from corehq.apps.api.fields import ToManyDocumentsField, ToOneDocumentField, UseIfRequested, ToManyDictField from corehq.apps.api import es from corehq.apps.api.es import ESQuerySet, ESUserError from django.conf import settings from custom.hope.models import CC_BIHAR_PREGNANCY class FakeXFormES(object): """ A mock of XFormES that will return the docs that have been added regardless of the query. """ def __init__(self): self.docs = [] self.queries = [] def add_doc(self, id, doc): self.docs.append(doc) def run_query(self, query): self.queries.append(query) start = query.get('from', 0) end = (query['size'] + start) if 'size' in query else None return { 'hits': { 'total': len(self.docs), 'hits': [{'_source': doc} for doc in self.docs[start:end]] } } class APIResourceTest(TestCase): """ Base class for shared API tests. Sets up a domain and user and provides some helper methods and properties for accessing the API """ resource = None # must be set by subclasses api_name = 'v0.4' # can be overridden by subclasses def setUp(self): self.maxDiff = None self.domain = Domain.get_or_create_with_name('qwerty', is_active=True) self.list_endpoint = reverse('api_dispatch_list', kwargs=dict(domain=self.domain.name, api_name=self.api_name, resource_name=self.resource.Meta.resource_name)) self.username = 'rudolph@qwerty.commcarehq.org' self.password = '***' self.user = WebUser.create(self.domain.name, self.username, self.password) self.user.set_role(self.domain.name, 'admin') self.user.save() def tearDown(self): self.user.delete() self.domain.delete() def single_endpoint(self, id): return reverse('api_dispatch_detail', kwargs=dict(domain=self.domain.name, api_name=self.api_name, resource_name=self.resource.Meta.resource_name, pk=id)) class TestXFormInstanceResource(APIResourceTest): """ Tests the XFormInstanceResource, currently only v0_4 TODO: Provide tests for each version, especially for those aspects which differ between versions. They should call into reusable tests for the functionality that is not different. """ resource = v0_4.XFormInstanceResource def test_get_list(self): """ Any form in the appropriate domain should be in the list from the API. """ # The actual infrastructure involves saving to CouchDB, having PillowTop # read the changes and write it to ElasticSearch. # In order to test just the API code, we set up a fake XFormES (this should # really be a parameter to the XFormInstanceResource constructor) # and write the translated form directly; we are not trying to test # the ptop infrastructure. #the pillow is set to offline mode - elasticsearch not needed to validate pillow = XFormPillow(online=False) fake_xform_es = FakeXFormES() v0_4.MOCK_XFORM_ES = fake_xform_es backend_form = XFormInstance(xmlns = 'fake-xmlns', domain = self.domain.name, received_on = datetime.utcnow(), form = { '#type': 'fake-type', '@xmlns': 'fake-xmlns' }) backend_form.save() translated_doc = pillow.change_transform(backend_form.to_json()) fake_xform_es.add_doc(translated_doc['_id'], translated_doc) self.client.login(username=self.username, password=self.password) response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_forms = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_forms), 1) api_form = api_forms[0] self.assertEqual(api_form['form']['@xmlns'], backend_form.xmlns) self.assertEqual(api_form['received_on'], backend_form.received_on.isoformat()) backend_form.delete() def test_get_list_xmlns(self): """ Forms can be filtered by passing ?xmlns=<xmlns> Since we not testing ElasticSearch, we only test that the proper query is generated. """ fake_xform_es = FakeXFormES() # A bit of a hack since none of Python's mocking libraries seem to do basic spies easily... prior_run_query = fake_xform_es.run_query def mock_run_query(es_query): self.assertEqual( sorted(es_query['filter']['and']), [{'term': {'doc_type': 'xforminstance'}}, {'term': {'domain.exact': 'qwerty'}}, {'term': {'xmlns.exact': 'foo'}}]) return prior_run_query(es_query) fake_xform_es.run_query = mock_run_query v0_4.MOCK_XFORM_ES = fake_xform_es self.client.login(username=self.username, password=self.password) response = self.client.get('%s?%s' % (self.list_endpoint, urlencode({'xmlns': 'foo'}))) self.assertEqual(response.status_code, 200) def test_get_list_received_on(self): """ Forms can be filtered by passing ?recieved_on_start=<date>&received_on_end=<date> Since we not testing ElasticSearch, we only test that the proper query is generated. """ fake_xform_es = FakeXFormES() start_date = datetime(1969, 6, 14) end_date = datetime(2011, 1, 2) # A bit of a hack since none of Python's mocking libraries seem to do basic spies easily... prior_run_query = fake_xform_es.run_query def mock_run_query(es_query): self.assertEqual(sorted(es_query['filter']['and']), [ {'range': {'received_on': {'from': start_date.isoformat()}}}, {'range': {'received_on': {'to': end_date.isoformat()}}}, {'term': {'doc_type': 'xforminstance'}}, {'term': {'domain.exact': 'qwerty'}}, ]) return prior_run_query(es_query) fake_xform_es.run_query = mock_run_query v0_4.MOCK_XFORM_ES = fake_xform_es self.client.login(username=self.username, password=self.password) response = self.client.get('%s?%s' % (self.list_endpoint, urlencode({ 'received_on_end': end_date.isoformat(), 'received_on_start': start_date.isoformat(), }))) self.assertEqual(response.status_code, 200) def test_get_list_ordering(self): ''' Forms can be ordering ascending or descending on received_on; by default ascending. ''' fake_xform_es = FakeXFormES() # A bit of a hack since none of Python's mocking libraries seem to do basic spies easily... prior_run_query = fake_xform_es.run_query queries = [] def mock_run_query(es_query): queries.append(es_query) return prior_run_query(es_query) fake_xform_es.run_query = mock_run_query v0_4.MOCK_XFORM_ES = fake_xform_es self.client.login(username=self.username, password=self.password) response = self.client.get('%s?order_by=received_on' % self.list_endpoint) # Runs *2* queries self.assertEqual(response.status_code, 200) self.assertEqual(queries[0]['sort'], [{'received_on': 'asc'}]) response = self.client.get('%s?order_by=-received_on' % self.list_endpoint) # Runs *2* queries self.assertEqual(response.status_code, 200) self.assertEqual(queries[2]['sort'], [{'received_on': 'desc'}]) class TestCommCareCaseResource(APIResourceTest): """ Tests the CommCareCaseREsource, currently only v0_4 """ resource = v0_4.CommCareCaseResource def test_get_list(self): """ Any case in the appropriate domain should be in the list from the API. """ # The actual infrastructure involves saving to CouchDB, having PillowTop # read the changes and write it to ElasticSearch. #the pillow is set to offline mode - elasticsearch not needed to validate pillow = CasePillow(online=False) fake_case_es = FakeXFormES() v0_4.MOCK_CASE_ES = fake_case_es modify_date = datetime.utcnow() backend_case = CommCareCase(server_modified_on=modify_date, domain=self.domain.name) backend_case.save() translated_doc = pillow.change_transform(backend_case.to_json()) fake_case_es.add_doc(translated_doc['_id'], translated_doc) self.client.login(username=self.username, password=self.password) response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_cases = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_cases), 1) api_case = api_cases[0] self.assertEqual(dateutil.parser.parse(api_case['server_date_modified']), backend_case.server_modified_on) backend_case.delete() class TestHOPECaseResource(APIResourceTest): """ Tests the HOPECaseREsource, currently only v0_4, just to make sure it does not crash right away """ resource = v0_4.HOPECaseResource def test_get_list(self): """ Any case in the appropriate domain should be in the list from the API. """ # The actual infrastructure involves saving to CouchDB, having PillowTop # read the changes and write it to ElasticSearch. #the pillow is set to offline mode - elasticsearch not needed to validate pillow = CasePillow(online=False) fake_case_es = FakeXFormES() v0_4.MOCK_CASE_ES = fake_case_es modify_date = datetime.utcnow() backend_case = CommCareCase(server_modified_on=modify_date, domain=self.domain.name) backend_case.type = CC_BIHAR_PREGNANCY backend_case.save() translated_doc = pillow.change_transform(backend_case.to_json()) fake_case_es.add_doc(translated_doc['_id'], translated_doc) self.client.login(username=self.username, password=self.password) response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_cases = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_cases), 2) api_case = api_cases['mother_lists'][0] self.assertEqual(dateutil.parser.parse(api_case['server_date_modified']), backend_case.server_modified_on) backend_case.delete() class TestCommCareUserResource(APIResourceTest): """ Basic sanity checking of v0_1.CommCareUserResource """ resource = v0_5.CommCareUserResource api_name = 'v0.5' def test_get_list(self): self.client.login(username=self.username, password=self.password) commcare_user = CommCareUser.create(domain=self.domain.name, username='fake_user', password='*****') backend_id = commcare_user.get_id response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_users = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_users), 1) self.assertEqual(api_users[0]['id'], backend_id) commcare_user.delete() def test_get_single(self): self.client.login(username=self.username, password=self.password) commcare_user = CommCareUser.create(domain=self.domain.name, username='fake_user', password='*****') backend_id = commcare_user._id response = self.client.get(self.single_endpoint(backend_id)) self.assertEqual(response.status_code, 200) api_user = simplejson.loads(response.content) self.assertEqual(api_user['id'], backend_id) def test_create(self): self.client.login(username=self.username, password=self.password) group = Group({"name": "test"}) group.save() self.assertEqual(0, len(CommCareUser.by_domain(self.domain.name))) user_json = { "username": "jdoe", "password": "qwer1234", "first_name": "John", "last_name": "Doe", "email": "jdoe@example.org", "language": "en", "phone_numbers": [ "+50253311399", "50253314588" ], "groups": [ group._id ], "user_data": { "chw_id": "13/43/DFA" } } response = self.client.post(self.list_endpoint, simplejson.dumps(user_json), content_type='application/json') self.assertEqual(response.status_code, 201) [user_back] = CommCareUser.by_domain(self.domain.name) self.assertEqual(user_back.username, "jdoe") self.assertEqual(user_back.first_name, "John") self.assertEqual(user_back.last_name, "Doe") self.assertEqual(user_back.email, "jdoe@example.org") self.assertEqual(user_back.language, "en") self.assertEqual(user_back.get_group_ids()[0], group._id) self.assertEqual(user_back.user_data["chw_id"], "13/43/DFA") self.assertEqual(user_back.default_phone_number, "+50253311399") user_back.delete() group.delete() def test_update(self): self.client.login(username=self.username, password=self.password) user = CommCareUser.create(domain=self.domain.name, username="test", password="qwer1234") group = Group({"name": "test"}) group.save() user_json = { "first_name": "test", "last_name": "last", "email": "tlast@example.org", "language": "pol", "phone_numbers": [ "+50253311399", "50253314588" ], "groups": [ group._id ], "user_data": { "chw_id": "13/43/DFA" } } backend_id = user._id response = self.client.put(self.single_endpoint(backend_id), simplejson.dumps(user_json), content_type='application/json') self.assertEqual(response.status_code, 202, response.content) self.assertEqual(1, len(CommCareUser.by_domain(self.domain.name))) modified = CommCareUser.get(backend_id) self.assertEqual(modified.username, "test") self.assertEqual(modified.first_name, "test") self.assertEqual(modified.last_name, "last") self.assertEqual(modified.email, "tlast@example.org") self.assertEqual(modified.language, "pol") self.assertEqual(modified.get_group_ids()[0], group._id) self.assertEqual(modified.user_data["chw_id"], "13/43/DFA") self.assertEqual(modified.default_phone_number, "+50253311399") modified.delete() group.delete() class TestWebUserResource(APIResourceTest): """ Basic sanity checking of v0_1.CommCareUserResource """ resource = v0_5.WebUserResource api_name = 'v0.5' def _check_user_data(self, user, json_user): self.assertEqual(user._id, json_user['id']) role = user.get_role(self.domain.name) self.assertEqual(role.name, json_user['role']) self.assertEqual(user.is_domain_admin(self.domain.name), json_user['is_admin']) for perm in ['edit_web_users', 'edit_commcare_users', 'edit_data', 'edit_apps', 'view_reports']: self.assertEqual(getattr(role.permissions, perm), json_user['permissions'][perm]) def test_get_list(self): self.client.login(username=self.username, password=self.password) response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_users = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_users), 1) self._check_user_data(self.user, api_users[0]) another_user = WebUser.create(self.domain.name, 'anotherguy', '***') another_user.set_role(self.domain.name, 'field-implementer') another_user.save() response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_users = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_users), 2) # username filter response = self.client.get('%s?username=%s' % (self.list_endpoint, 'anotherguy')) self.assertEqual(response.status_code, 200) api_users = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_users), 1) self._check_user_data(another_user, api_users[0]) response = self.client.get('%s?username=%s' % (self.list_endpoint, 'nomatch')) self.assertEqual(response.status_code, 200) api_users = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_users), 0) def test_get_single(self): self.client.login(username=self.username, password=self.password) response = self.client.get(self.single_endpoint(self.user._id)) self.assertEqual(response.status_code, 200) api_user = simplejson.loads(response.content) self._check_user_data(self.user, api_user) def test_create(self): self.client.login(username=self.username, password=self.password) user_json = { "username":"test_1234", "password":"qwer1234", "email":"admin@example.com", "first_name":"Joe", "is_admin": True, "last_name":"Admin", "permissions":{ "edit_apps":True, "edit_commcare_users":True, "edit_data":True, "edit_web_users":True, "view_reports":True }, "phone_numbers":[ ], "role":"admin" } response = self.client.post(self.list_endpoint, simplejson.dumps(user_json), content_type='application/json') self.assertEqual(response.status_code, 201) user_back = WebUser.get_by_username("test_1234") self.assertEqual(user_back.username, "test_1234") self.assertEqual(user_back.first_name, "Joe") self.assertEqual(user_back.last_name, "Admin") self.assertEqual(user_back.email, "admin@example.com") user_back.delete() def test_update(self): self.client.login(username=self.username, password=self.password) user = WebUser.create(domain=self.domain.name, username="test", password="qwer1234") user_json = { "email":"admin@example.com", "first_name":"Joe", "is_admin": True, "last_name":"Admin", "permissions":{ "edit_apps":True, "edit_commcare_users":True, "edit_data":True, "edit_web_users":True, "view_reports":True }, "phone_numbers":[ ], "role":"admin" } backend_id = user._id response = self.client.put(self.single_endpoint(backend_id), simplejson.dumps(user_json), content_type='application/json') self.assertEqual(response.status_code, 202, response.content) modified = WebUser.get(backend_id) self.assertEqual(modified.username, "test") self.assertEqual(modified.first_name, "Joe") self.assertEqual(modified.last_name, "Admin") self.assertEqual(modified.email, "admin@example.com") modified.delete() class TestRepeaterResource(APIResourceTest): """ Basic sanity checking of v0_4.RepeaterResource """ resource = v0_4.RepeaterResource repeater_types = [FormRepeater, CaseRepeater, ShortFormRepeater] def test_get(self): self.client.login(username=self.username, password=self.password) # Add a repeater of various types and check that it comes back for cls in self.repeater_types: repeater = cls(domain=self.domain.name, url='http://example.com/forwarding/{cls}'.format(cls=cls.__name__)) repeater.save() backend_id = repeater._id response = self.client.get(self.single_endpoint(backend_id)) self.assertEqual(response.status_code, 200) result = simplejson.loads(response.content) self.assertEqual(result['id'], backend_id) self.assertEqual(result['url'], repeater.url) self.assertEqual(result['domain'], repeater.domain) self.assertEqual(result['type'], cls.__name__) repeater.delete() def test_get_list(self): self.client.login(username=self.username, password=self.password) # Add a form repeater and check that it comes back form_repeater = FormRepeater(domain=self.domain.name, url='http://example.com/forwarding/form') form_repeater.save() backend_id = form_repeater._id response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_repeaters = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_repeaters), 1) self.assertEqual(api_repeaters[0]['id'], backend_id) self.assertEqual(api_repeaters[0]['url'], form_repeater.url) self.assertEqual(api_repeaters[0]['domain'], form_repeater.domain) self.assertEqual(api_repeaters[0]['type'], 'FormRepeater') # Add a case repeater and check that both come back case_repeater = CaseRepeater(domain=self.domain.name, url='http://example.com/forwarding/case') case_repeater.save() backend_id = case_repeater._id response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_repeaters = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_repeaters), 2) api_case_repeater = filter(lambda r: r['type'] == 'CaseRepeater', api_repeaters)[0] self.assertEqual(api_case_repeater['id'], case_repeater._id) self.assertEqual(api_case_repeater['url'], case_repeater.url) self.assertEqual(api_case_repeater['domain'], case_repeater.domain) form_repeater.delete() case_repeater.delete() def test_create(self): self.client.login(username=self.username, password=self.password) for cls in self.repeater_types: self.assertEqual(0, len(cls.by_domain(self.domain.name))) repeater_json = { "domain": self.domain.name, "type": cls.__name__, "url": "http://example.com/forwarding/{cls}".format(cls=cls.__name__), } response = self.client.post(self.list_endpoint, simplejson.dumps(repeater_json), content_type='application/json') self.assertEqual(response.status_code, 201, response.content) [repeater_back] = cls.by_domain(self.domain.name) self.assertEqual(repeater_json['domain'], repeater_back.domain) self.assertEqual(repeater_json['type'], repeater_back.doc_type) self.assertEqual(repeater_json['url'], repeater_back.url) repeater_back.delete() def test_update(self): self.client.login(username=self.username, password=self.password) for cls in self.repeater_types: repeater = cls(domain=self.domain.name, url='http://example.com/forwarding/{cls}'.format(cls=cls.__name__)) repeater.save() backend_id = repeater._id repeater_json = { "domain": self.domain.name, "type": cls.__name__, "url": "http://example.com/forwarding/modified/{cls}".format(cls=cls.__name__), } response = self.client.put(self.single_endpoint(backend_id), simplejson.dumps(repeater_json), content_type='application/json') self.assertEqual(response.status_code, 204, response.content) self.assertEqual(1, len(cls.by_domain(self.domain.name))) modified = cls.get(backend_id) self.assertTrue('modified' in modified.url) repeater.delete() class TestReportPillow(TestCase): def test_xformPillowTransform(self): """ Test to make sure report xform and reportxform pillows strip the appVersion dict to match the mappings """ pillows = [ReportXFormPillow(online=False),XFormPillow(online=False)] bad_appVersion = { "_id": "foo", "domain": settings.ES_XFORM_FULL_INDEX_DOMAINS[0], "form": { "meta": { "@xmlns": "http://openrosa.org/jr/xforms", "username": "someuser", "instanceID": "foo", "userID": "some_user_id", "timeEnd": "2013-09-20T01:33:12Z", "appVersion": { "@xmlns": "http://commcarehq.org/xforms", "#text": "CCODK:\"2.5.1\"(11126). v236 CC2.5b[11126] on April-15-2013" }, "timeStart": "2013-09-19T01:13:20Z", "deviceID": "somedevice" } } } for pillow in pillows: cleaned = pillow.change_transform(bad_appVersion) self.assertFalse(isinstance(cleaned['form']['meta']['appVersion'], dict)) self.assertTrue(isinstance(cleaned['form']['meta']['appVersion'], str)) self.assertTrue(cleaned['form']['meta']['appVersion'], "CCODK:\"2.5.1\"(11126). v236 CC2.5b[11126] on April-15-2013") class TestESQuerySet(TestCase): ''' Tests the ESQuerySet for appropriate slicing, etc ''' def test_slice(self): es = FakeXFormES() for i in xrange(0, 1300): es.add_doc(i, {'i': i}) queryset = ESQuerySet(es_client=es, payload={}) qs_slice = list(queryset[3:7]) self.assertEqual(es.queries[0]['from'], 3) self.assertEqual(es.queries[0]['size'], 4) self.assertEqual(len(qs_slice), 4) queryset = ESQuerySet(es_client=es, payload={}) qs_slice = list(queryset[10:20]) self.assertEqual(es.queries[1]['from'], 10) self.assertEqual(es.queries[1]['size'], 10) self.assertEqual(len(qs_slice), 10) queryset = ESQuerySet(es_client=es, payload={}) qs_slice = list(queryset[500:1000]) self.assertEqual(es.queries[2]['from'], 500) self.assertEqual(es.queries[2]['size'], 500) self.assertEqual(len(qs_slice), 500) def test_order_by(self): es = FakeXFormES() for i in xrange(0, 1300): es.add_doc(i, {'i': i}) queryset = ESQuerySet(es_client=es, payload={}) qs_asc = list(queryset.order_by('foo')) self.assertEqual(es.queries[0]['sort'], [{'foo': 'asc'}]) qs_desc = list(queryset.order_by('-foo')) self.assertEqual(es.queries[1]['sort'], [{'foo': 'desc'}]) qs_overwrite = list(queryset.order_by('bizzle').order_by('-baz')) self.assertEqual(es.queries[2]['sort'], [{'baz': 'desc'}]) qs_multi = list(queryset.order_by('one', '-two', 'three')) self.assertEqual(es.queries[3]['sort'], [{'one': 'asc'}, {'two': 'desc'}, {'three': 'asc'}]) class ToManySourceModel(object): def __init__(self, other_model_ids, other_model_dict): self.other_model_dict = other_model_dict self.other_model_ids = other_model_ids @property def other_models(self): return [self.other_model_dict.get(id) for id in self.other_model_ids] class ToManyDestModel(object): def __init__(self, id): self.id = id class ToManySourceResource(Resource): other_model_ids = fields.ListField(attribute='other_model_ids') other_models = ToManyDocumentsField('corehq.apps.api.tests.ToManyDestResource', attribute='other_models') def __init__(self, objs): super(ToManySourceResource, self).__init__() self.objs = objs def obj_get_list(self): return self.objs class Meta: model_class = ToManySourceModel class ToManyDestResource(Resource): id = fields.CharField(attribute='id') class Meta: model_class = ToManyDestModel class TestToManyDocumentsField(TestCase): ''' Basic test that ToMany dehydrated alright ''' def test_requested_use_in(self): dest_objs = { 'foo': ToManyDestModel('foo'), 'bar': ToManyDestModel('bar'), 'baz': ToManyDestModel('baz'), } source_objs = [ ToManySourceModel(other_model_ids=['foo', 'bar'], other_model_dict=dest_objs), ToManySourceModel(other_model_ids=['bar', 'baz'], other_model_dict=dest_objs) ] source_resource = ToManySourceResource(source_objs) bundle = source_resource.build_bundle(obj=source_objs[0]) dehydrated_bundle = source_resource.full_dehydrate(bundle) self.assertTrue('other_models' in dehydrated_bundle.data) self.assertEqual([other['id'] for other in dehydrated_bundle.data['other_models']], ['foo', 'bar']) bundle = source_resource.build_bundle(obj=source_objs[1]) dehydrated_bundle = source_resource.full_dehydrate(bundle) self.assertEqual([other['id'] for other in dehydrated_bundle.data['other_models']], ['bar', 'baz']) class ToManyDictSourceModel(object): def __init__(self, other_model_ids, other_model_dict): self.other_model_dict = other_model_dict self.other_model_ids = other_model_ids @property def other_models(self): return dict([(key, self.other_model_dict.get(id)) for key, id in self.other_model_ids.items()]) class ToManyDictDestModel(object): def __init__(self, id): self.id = id class ToManyDictSourceResource(Resource): other_model_ids = fields.ListField(attribute='other_model_ids') other_models = ToManyDictField('corehq.apps.api.tests.ToManyDictDestResource', attribute='other_models') def __init__(self, objs): super(ToManyDictSourceResource, self).__init__() self.objs = objs def obj_get_list(self): return self.objs class Meta: model_class = ToManyDictSourceModel class ToManyDictDestResource(Resource): id = fields.CharField(attribute='id') class Meta: model_class = ToManyDictDestModel class TestToManyDictField(TestCase): ''' Basic test that ToMany dehydrated alright ''' def test_dehydrate(self): dest_objs = { 'foo': ToManyDictDestModel('foo'), 'bar': ToManyDictDestModel('bar'), 'baz': ToManyDictDestModel('baz'), } source_objs = [ ToManyDictSourceModel(other_model_ids={ 'first_other': 'foo', 'second_other': 'bar'}, other_model_dict=dest_objs), ToManyDictSourceModel(other_model_ids={ 'first_other': 'bar', 'second_other': 'baz'}, other_model_dict=dest_objs) ] source_resource = ToManyDictSourceResource(source_objs) bundle = source_resource.build_bundle(obj=source_objs[0]) dehydrated_bundle = source_resource.full_dehydrate(bundle) self.assertTrue('other_models' in dehydrated_bundle.data) self.assertEqual(dehydrated_bundle.data['other_models']['first_other']['id'] , 'foo') self.assertEqual(dehydrated_bundle.data['other_models']['second_other']['id'], 'bar') bundle = source_resource.build_bundle(obj=source_objs[1]) dehydrated_bundle = source_resource.full_dehydrate(bundle) self.assertEqual(dehydrated_bundle.data['other_models']['first_other']['id'] , 'bar') self.assertEqual(dehydrated_bundle.data['other_models']['second_other']['id'], 'baz') class ToOneSourceModel(object): def __init__(self, other_model_id, other_model_dict): self.other_model_dict = other_model_dict self.other_model_id = other_model_id @property def other_model(self): return self.other_model_dict.get(self.other_model_id) class ToOneDestModel(object): def __init__(self, id): self.id = id class ToOneSourceResource(Resource): other_model_id = fields.ListField(attribute='other_model_id') other_model = ToOneDocumentField('corehq.apps.api.tests.ToOneDestResource', attribute='other_model') def __init__(self, objs): super(ToOneSourceResource, self).__init__() self.objs = objs def obj_get_list(self): return self.objs class Meta: model_class = ToOneSourceModel class ToOneDestResource(Resource): id = fields.CharField(attribute='id') class Meta: model_class = ToOneDestModel class TestToOneDocumentField(TestCase): ''' Basic test of the <fieldname>__full ''' def test_requested_use_in(self): dest_objs = { 'foo': ToOneDestModel('foo'), 'bar': ToOneDestModel('bar'), 'baz': ToOneDestModel('baz'), } source_objs = [ ToOneSourceModel(other_model_id='foo', other_model_dict=dest_objs), ToOneSourceModel(other_model_id='bar', other_model_dict=dest_objs) ] source_resource = ToOneSourceResource(source_objs) bundle = source_resource.build_bundle(obj=source_objs[0]) dehydrated_bundle = source_resource.full_dehydrate(bundle) self.assertEqual(dehydrated_bundle.data['other_model']['id'], 'foo') bundle = source_resource.build_bundle(obj=source_objs[1]) dehydrated_bundle = source_resource.full_dehydrate(bundle) self.assertEqual(dehydrated_bundle.data['other_model']['id'], 'bar') class UseIfRequestedModel(object): def __init__(self, id): self.id = id class UseIfRequestedTestResource(Resource): something = UseIfRequested(fields.CharField(attribute='id')) def __init__(self, objs): super(UseIfRequestedTestResource, self).__init__() self.objs = objs def obj_get_list(self): return self.objs class Meta: model_class = UseIfRequestedModel class TestUseIfRequested(TestCase): def test_requested_use_in(self): objs = [ UseIfRequestedModel(id='foo'), UseIfRequestedModel(id='bar') ] test_resource = UseIfRequestedTestResource(objs) bundle = test_resource.build_bundle(obj=objs[0]) dehydrated_bundle = test_resource.full_dehydrate(bundle) self.assertFalse('id' in dehydrated_bundle.data) bundle = test_resource.build_bundle(obj=objs[0]) bundle.request.GET['something__full'] = 'true' dehydrated_bundle = test_resource.full_dehydrate(bundle) self.assertTrue('something' in dehydrated_bundle.data) self.assertEqual(dehydrated_bundle.data['something'], 'foo') class TestSingleSignOnResource(APIResourceTest): resource = v0_4.SingleSignOnResource def setUp(self): super(TestSingleSignOnResource, self).setUp() self.commcare_username = 'webby@qwerty.commcarehq.org' self.commcare_password = '*****' self.commcare_user = CommCareUser.create(self.domain.name, self.commcare_username, self.commcare_password) def tearDown(self): self.commcare_user.delete() super(TestSingleSignOnResource, self).tearDown() def test_web_user_success(self): ''' If correct credentials for a web user are submitted, the response is the profile of that web user ''' response = self.client.post(self.list_endpoint, {'username': self.username, 'password': self.password}) self.assertEqual(response.status_code, 200) def test_commcare_user_success(self): ''' If correct credentials for a commcare user are submitted, the response is the record for that commcare user ''' response = self.client.post(self.list_endpoint, {'username': self.commcare_username, 'password': self.commcare_password}) self.assertEqual(response.status_code, 200) def test_wrong_domain(self): ''' If correct credentials for a user in a different domain are submitted, the response is forbidden ''' wrong_domain = Domain.get_or_create_with_name('dvorak', is_active=True) wrong_list_endpoint = reverse('api_dispatch_list', kwargs=dict(domain=wrong_domain.name, api_name=self.api_name, resource_name=self.resource.Meta.resource_name)) response = self.client.post(wrong_list_endpoint, {'username': self.username, 'password': self.password}) self.assertEqual(response.status_code, 403) wrong_domain.delete() def test_wrong_credentials(self): ''' If incorrect password for the correct username and domain pair are submitted, the response is forbidden ''' response = self.client.post(self.list_endpoint, {'username': self.username, 'password': 'bimbizzleboozle'}) self.assertEqual(response.status_code, 403) def test_no_username(self): ''' If no username supplied, 400 ''' response = self.client.post(self.list_endpoint, {'password': 'bimbizzleboozle'}) self.assertEqual(response.status_code, 400) def test_no_password(self): ''' If no password supplied, 400 ''' response = self.client.post(self.list_endpoint, {'username': self.username}) self.assertEqual(response.status_code, 400) class TestGroupResource(APIResourceTest): resource = v0_5.GroupResource api_name = 'v0.5' def test_get_list(self): self.client.login(username=self.username, password=self.password) group = Group({"name": "test", "domain": self.domain.name}) group.save() backend_id = group.get_id response = self.client.get(self.list_endpoint) self.assertEqual(response.status_code, 200) api_groups = simplejson.loads(response.content)['objects'] self.assertEqual(len(api_groups), 1) self.assertEqual(api_groups[0]['id'], backend_id) group.delete() def test_get_single(self): self.client.login(username=self.username, password=self.password) group = Group({"name": "test", "domain": self.domain.name}) group.save() backend_id = group.get_id response = self.client.get(self.single_endpoint(backend_id)) self.assertEqual(response.status_code, 200) api_groups = simplejson.loads(response.content) self.assertEqual(api_groups['id'], backend_id) def test_create(self): self.client.login(username=self.username, password=self.password) self.assertEqual(0, len(Group.by_domain(self.domain.name))) group_json = { "case_sharing": True, "metadata": { "localization": "Ghana" }, "name": "test group", "reporting": True, } response = self.client.post(self.list_endpoint, simplejson.dumps(group_json), content_type='application/json') self.assertEqual(response.status_code, 201) [group_back] = Group.by_domain(self.domain.name) self.assertEqual(group_back.name, "test group") self.assertTrue(group_back.reporting) self.assertTrue(group_back.case_sharing) self.assertEqual(group_back.metadata["localization"], "Ghana") group_back.delete() def test_update(self): self.client.login(username=self.username, password=self.password) group = Group({"name": "test", "domain": self.domain.name}) group.save() group_json = { "case_sharing": True, "metadata": { "localization": "Ghana" }, "name": "test group", "reporting": True, } backend_id = group._id response = self.client.put(self.single_endpoint(backend_id), simplejson.dumps(group_json), content_type='application/json') self.assertEqual(response.status_code, 202, response.content) self.assertEqual(1, len(Group.by_domain(self.domain.name))) modified = Group.get(backend_id) self.assertEqual(modified.name, "test group") self.assertTrue(modified.reporting) self.assertTrue(modified.case_sharing) self.assertEqual(modified.metadata["localization"], "Ghana") modified.delete() class FakeUserES(object): def __init__(self): self.docs = [] self.queries = [] def add_doc(self, doc): self.docs.append(doc) def make_query(self, q=None, fields=None, domain=None, start_at=None, size=None): self.queries.append(q) start = int(start_at) if start_at else 0 end = min(len(self.docs), start + int(size)) if size else None return self.docs[start:end] class TestBulkUserAPI(APIResourceTest): resource = v0_5.BulkUserResource api_name = 'v0.5' def setUp(self): self.domain = Domain.get_or_create_with_name('qwerty', is_active=True) self.username = 'rudolph@qwerty.commcarehq.org' self.password = '***' self.admin_user = WebUser.create(self.domain.name, self.username, self.password) self.admin_user.set_role(self.domain.name, 'admin') self.admin_user.save() self.fake_user_es = FakeUserES() v0_5.MOCK_BULK_USER_ES = self.mock_es_wrapper self.make_users() def tearDown(self): self.admin_user.delete() self.domain.delete() v0_5.MOCK_BULK_USER_ES = None def make_users(self): users = [ ('Robb', 'Stark'), ('Jon', 'Snow'), ('Brandon', 'Stark'), ('Eddard', 'Stark'), ('Catelyn', 'Stark'), ('Tyrion', 'Lannister'), ('Tywin', 'Lannister'), ('Jamie', 'Lannister'), ('Cersei', 'Lannister'), ] for first, last in users: username = '_'.join([first.lower(), last.lower()]) email = username + '@qwerty.commcarehq.org' self.fake_user_es.add_doc({ 'id': 'lskdjflskjflaj', 'email': email, 'username': username, 'first_name': first, 'last_name': last, 'phone_numbers': ['9042411080'], }) def mock_es_wrapper(self, *args, **kwargs): return self.fake_user_es.make_query(**kwargs) @property def list_endpoint(self): return reverse( 'api_dispatch_list', kwargs={ 'domain': self.domain.name, 'api_name': self.api_name, 'resource_name': self.resource.Meta.resource_name, } ) def test_excluded_field(self): result = self.query(fields=['email', 'first_name', 'password']) self.assertEqual(result.status_code, 400) def query(self, **params): self.client.login(username=self.username, password=self.password) url = '%s?%s' % (self.list_endpoint, urlencode(params, doseq=True)) return self.client.get(url) def test_paginate(self): limit = 3 result = self.query(limit=limit) self.assertEqual(result.status_code, 200) users = simplejson.loads(result.content)['objects'] self.assertEquals(len(users), limit) result = self.query(start_at=limit, limit=limit) self.assertEqual(result.status_code, 200) users = simplejson.loads(result.content)['objects'] self.assertEquals(len(users), limit) def test_basic(self): response = self.query() self.assertEqual(response.status_code, 200)
# This file is a part of OMPC (http://ompc.juricap.com/) import sys sys.path += ['../outside/ply'] OCTAVE = False # TODO # # - make the no ';' print-out an option, the output is ugly # - add 1 2, or anything like that, everything after the NAME is considered # a string # - print -png ... and similar _keywords = ["break", "case", "catch", "continue", "else", "elseif", "end", "for", "function", "global", "if", "otherwise", "persistent", "return", "switch", "try", "while"] _octave_keywords = ["endif", "endwhile", "endfunction", "endswicth", "endfor"] # functions that are known to not return a value, this will make the # resulting code prettier _special = ['pause', 'plot', 'hold', 'axis', 'pcolor', 'colorbar', 'pause', 'disp', 'colormap', 'set', 'title', 'xlabel', 'ylabel'] _ompc_reserved = ['mfunction', 'mcat', 'mcellarray', 'marray', 'mstring'] def isompcreserved(name): return name in _ompc_reserved reserved = dict( (x.lower(), x.upper()) for x in _keywords ) if OCTAVE: reserved.update( dict( (x.lower(), 'END') for x in _octave_keywords ) ) tokens = [ 'NAME', 'NUMBER', 'STRING', 'COMMA', 'SEMICOLON', 'NEWLINE', 'DOTTIMES', 'DOTDIVIDE', 'DOTPOWER', 'NOTEQUAL', 'ISEQUAL', 'TRANS', 'CONJTRANS', 'LESS', 'GREATER', 'LESSEQUAL', 'GREATEREQUAL', 'AND', 'OR', 'NOT', 'ELOR', 'ELAND', 'LBRACKET', 'RBRACKET', 'LCURLY', 'RCURLY', 'LPAREN', 'RPAREN', 'LAMBDA', 'COMMENT', ] + reserved.values() literals = ['=', '+', '-', '*', '/', '^', ':', "'", '.'] states = ( ('comment', 'exclusive'), ('inlist', 'inclusive'), ('inparen', 'inclusive'), ) # def t_comment(t): # r'%(.*)' # t.type = 'COMMENT' # t.value = '%s'%t.value[1:] # t.lexer.lineno += 1 # return t def t_LPAREN(t): r'\(' t.lexer.push_state('inparen') return t def t_inparen_END(t): 'end' t.value = 'end' t.type = 'NUMBER' return t def t_inlist_END(t): 'end' # FIXME, this is wrong t.value = 'end' t.type = 'NUMBER' return t def t_inparen_RPAREN(t): r'\)' t.lexer.pop_state() return t def t_LBRACKET(t): r'\[' t.lexer.push_state('inlist') return t def t_inlist_RBRACKET(t): r'\]' t.lexer.pop_state() return t def t_LCURLY(t): r'\{' t.lexer.push_state('inlist') return t # cannot do this because [a(1,2) b] = min(1:4); #def t_inlist_COMMA(t): # r',' # t.type = 'LISTCOMMA' # return t def t_inlist_RCURLY(t): r'\}' global _inparen t.lexer.pop_state() _inparen = False return t t_COMMA = ',' t_SEMICOLON = r';' # Comments def t_PERCENT(t): r'%' t.lexer.push_state('comment') def t_comment_body(t): r'([^\n]+)' t.type = 'COMMENT' t.lexer.pop_state() return t t_comment_ignore = '.*' def t_comment_error(t): pass # Tokens t_DOTTIMES = r'\.\*' t_DOTDIVIDE = r'\./' t_DOTPOWER = r'\.\^' t_NOTEQUAL = r'~=' t_ISEQUAL = r'==' t_LESS = r'<' t_GREATER = r'>' t_LESSEQUAL = r'<=' t_GREATEREQUAL = r'>=' t_ELAND = r'&' t_ELOR = '\|' t_AND = r'&&' t_OR = '\|\|' t_NOT = '~' def t_NAME(t): r'[a-zA-Z][a-zA-Z0-9_]*' t.type = 'NAME' if t.value in reserved: t.type = reserved.get(t.value) # Check for reserved words else: t.value = _check_name(t.value) return t t_LAMBDA = r'@' t_TRANS = r"\.'" t_CONJTRANS = r"'" def t_STRING(t): r"'((?:''|[^\n'])*)'" pos = t.lexer.lexpos - len(t.value) if pos == 0: return t prec = t.lexer.lexdata[pos-1] if prec == '.': t.value = ".'" t.type = "TRANS" t.lexer.lexpos = pos + 2 elif prec in ' \t[{(=;,\n': # it's a string, translate "''" to t.value = "'%s'"%t.value[1:-1].replace("\\", r"\\") t.value = "'%s'"%t.value[1:-1].replace("''", r"\'") else: t.value = "'" t.type = "CONJTRANS" t.lexer.lexpos = pos + 1 return t def t_NUMBER(t): r'(?:\d+\.\d*|\d*\.\d+|\d+)(?:[e|E]-?\d+|)' try: float(t.value) except ValueError: _print_error("Is this really a float?", t.value) return t def t_COMMENT(t): r'%' global _comment _comment = t.value t.lexer.lineno += 1 #pass # No return value. Token discarded t_ignore = " \t" def t_NEWLINE(t): r'\n' pass # semicolon has a different function inside of [] and {} def t_inlist_SEMICOLON(t): r';' t.type = 'COMMA' t.value = 'SEMICOLON' return t #pass def t_error(t): _print_error("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) # Build the lexer import lex lex.lex() # Parsing rules precedence = ( ('left', 'TRANS', 'CONJTRANS'), ('nonassoc', 'LESS', 'GREATER'), ('left', '+', '-'), ('left', '*', '/', 'DOTTIMES', 'DOTDIVIDE'), ('left', '^', 'DOTPOWER'), ('right', 'UMINUS', 'UPLUS'), ) # dictionary of names names = { } _key_stack = [] _switch_stack = [] _tabs = 0 _comment = None TABSHIFT = 4 def _reset(): global _tabs, names, _key_stack, _switch_stack, _comment, TABSHIFT names = { } _key_stack = [] _switch_stack = [] _tabs = 0 _comment = None TABSHIFT = 4 _reset() def _gettabs(): global _tabs, TABSHIFT return ' '*_tabs def _print3000(*args,**kwargs): """Emulation of Py3k's print. """ from sys import stdout sep = kwargs.get('sep',' ') of = kwargs.get('file',stdout) end = kwargs.get('end','\n') of.write(sep.join(map(str,args))) of.write(end) _outfile = None def _print(src): global _outfile ss = src.split('\n') for x in ss[:-1]: _print3000(' '*_tabs + x, sep='', end='\n', file=_outfile) _print3000(' '*_tabs + ss[-1], sep='', end='', file=_outfile) _errors = [] def _print_error(*args, **kwargs): """Error output. This function should be used for output of all errors. """ global _errors, _lineno from sys import stderr sep = kwargs.get('sep',' ') of = kwargs.get('file', stderr) end = kwargs.get('end','\n') if file is None: _errors.append(' '.join(args)) else: d = {'sep':sep, 'file':of} _print3000(**d) _print3000(*args, **d) _print3000("The line: %s"%_last_line, **d) #_print3000("On line: %d!"%(_lineno), **d) def _pop_from_key_stack(): global _key_stack if len(_key_stack) < 1: _print_error('An "end" without matching keyword!') _reset() return None return _key_stack.pop() def p_statement_list(p): '''statement_list : statement | statement COMMA | statement SEMICOLON''' p[0] = _print_statement(p[1], len(p) > 2 and p[2] or None, p[0]) _lvalues = [] _knoend = list(_keywords) _knoend.remove('end') def _print_statement(x, send, p0): global _lvalues, _key_stack, _tabs # print '--------------------', x, send, p0 finish = '' if p0 and p0.strip()[-1] not in ':;': finish = '; ' res = x # don't print results of keyword statements and commands, FIXME xs = x.strip() and x.strip().split()[0] dedent = False if not xs: pass elif xs[0] == '@': assert len(_key_stack) == 1 and _key_stack[0] == 'function' _pop_from_key_stack() _tabs = TABSHIFT dedent = True #xs in _special or \ elif xs in _keywords or \ xs[:2] == '__' or xs in ['elif', 'else:']: if xs not in ['end', 'break', 'continue', 'return', 'global']: dedent = True elif send is None or send == ',': # we need to print also the result if _lvalues: for lv in _lvalues: res += '; print %s'%lv _lvalues = [] if dedent: _tabs -= TABSHIFT _print(finish+res) if dedent: _tabs += TABSHIFT return res def p_statement_list2(p): '''statement_list : statement_list statement | statement_list COMMA statement | statement_list SEMICOLON statement''' p[0] = _print_statement('\n'+p[-1], len(p)>3 and p[2] or None, p[0]) def p_statement_expr(p): '''statement : expression''' p[0] = p[1] def p_statement_function(p): '''statement : FUNCTION LBRACKET name_list RBRACKET "=" NAME LPAREN name_list RPAREN | FUNCTION LBRACKET name_list RBRACKET "=" NAME | FUNCTION NAME "=" NAME LPAREN name_list RPAREN | FUNCTION NAME "=" NAME | FUNCTION NAME LPAREN name_list RPAREN | FUNCTION NAME''' global _tabs, _key_stack, _func_name argout, fname, argin = None, None, None if '=' in p: if p[2] == '[': argout, fname = p[3], p[6] if '(' in p: argin = p[8] else: argout, fname = p[2], p[4] if '(' in p: argin = p[6] else: fname = p[2] if '(' in p: argin = p[4] # split argin and make all of them equal None # if one of the is varargin, change it to *varargin argin = [ x.strip() for x in argin.split(',') ] last = [] if 'varargin' in argin: if argin[-1] != 'varargin': p_error(p) argin.pop() last = ['*varargin'] argin = ', '.join([ '%s=None'%x for x in argin ] + last) if argout is None: argout = '' p[0] = '@mfunction("%s")\ndef %s(%s):'%(argout, fname, argin) _func_name = fname _key_stack.append('function') _tabs += TABSHIFT def p_expression_lambda_handle(p): '''expression : LAMBDA NAME''' # function handle p[0] = p[1] def p_expression_name_list(p): '''name_list : name_list COMMA NAME''' p[0] = '%s, %s'%(p[1], p[3]) def p_expression_name_list_2(p): '''name_list : NAME''' p[0] = p[1] def p_expression_lambda(p): '''expression : LAMBDA LPAREN name_list RPAREN expression''' p[0] = 'lambda %s: %s'%(p[3], p[5]) # def '''statement : CLASSDEF NAME''' # pass # properties # methods # events def p_statement_for(p): '''statement : FOR NAME "=" expression''' global _tabs, _key_stack p[0] = 'for %s in %s:'%(p[2], p[4]) _key_stack.append('for') #_print(p[0]) _tabs += TABSHIFT def p_statement_while(p): '''statement : WHILE expression''' global _tabs, _key_stack p[0] = 'while %s:'%p[2] _key_stack.append('while') #_print(p[0]) _tabs += TABSHIFT def p_statement_if(p): '''statement : IF expression''' global _key_stack, _tabs p[0] = 'if %s:'%p[2] _key_stack.append('if') #_print(p[0]) _tabs += TABSHIFT def p_statement_elseif(p): '''statement : ELSEIF expression''' global _tabs, _key_stack # FIXME if p is cellarray we should copare with in p[0] = 'elif %s:'%p[2] assert _key_stack[-1] == 'if' _tabs -= TABSHIFT #_print(p[0]) _tabs += TABSHIFT def p_statement_else(p): '''statement : ELSE''' global _tabs, _key_stack # FIXME if p is cellarray we should copare with in p[0] = 'else:' assert _key_stack[-1] == 'if' _tabs -= TABSHIFT #_print(p[0]) _tabs += TABSHIFT def p_statement_break(p): """statement : BREAK""" p[0] = 'break' def p_statement_continue(p): """statement : CONTINUE""" p[0] = 'continue' #_print(p[0]) def p_statement_return(p): """statement : RETURN""" p[0] = 'return' def p_statement_switch(p): '''statement : SWITCH expression''' global _tabs, _key_stack, _switch_stack svar = '__switch_%d__'%len(_switch_stack) p[0] = '%s = %s\nif 0:\n%spass'%(svar, p[2], ' '*TABSHIFT) _key_stack.append('switch') _switch_stack.append( svar ) _tabs += TABSHIFT #_print(p[0]) def p_statement_case(p): '''statement : CASE expression''' global _tabs, _key_stack, _switch_stack # FIXME if p is cellarray we should copare with in p[0] = 'elif %s == %s:'%(_switch_stack[-1], p[2]) assert _key_stack[-1] == 'switch' #_tabs -= TABSHIFT #_print(p[0]) #_tabs += TABSHIFT def p_statement_otherwise(p): """statement : OTHERWISE""" global _key_stack p[0] = 'else:' assert _key_stack[-1] == 'switch' #_tabs -= TABSHIFT #_print(p[0]) #_tabs += TABSHIFT def p_statement_global(p): """statement : GLOBAL list_spaces""" p[0] = 'global %s'%p[2] #_print(p[0]) _func_name = None def p_statement_persistent(p): """statement : PERSISTENT list_spaces""" global _func_name # FIXME, store in in a module or thread ??? if _func_name is None: _print_error('"persistent" outside of a function block!') p[0] = 'global __persistent__\n' p[0] += "__persistent__['%s'] = '%s'"%(_func_name, p[2]) def p_expression_list_space(p): '''list_spaces : list_spaces NAME''' p[0] = '%s, %s'%(p[1], p[2]) def p_expression_list_space_2(p): '''list_spaces : NAME''' p[0] = p[1] def p_statement_try(p): '''statement : TRY''' global _tabs, _key_stack p[0] = 'try:' _key_stack.append('try') _tabs += TABSHIFT def p_statement_catch(p): '''statement : CATCH''' global _tabs, _key_stack p[0] = 'except:'%(_switch_stack[-1], p[2]) assert _key_stack[-1] == 'try' def p_statement_end(p): 'statement : END' global _tabs, _key_stack, _switch_stack _tabs -= TABSHIFT p[0] = 'end' kw = _pop_from_key_stack() if kw == 'switch': _switch_stack.pop() def _getname(lname): pos = lname.find('(') if pos == -1: pos = lname.find('{') if pos == -1: return lname return lname[:pos] def p_statement_assign(p): '''statement : name_sub "=" expression | name_attr "=" expression | exprmcat "=" expression | NAME "=" expression''' global _lvalues lname = p[1] if lname[0] == '[': # [...] ns = [] for x in lname[1:-1].split(','): ln = _getname(x.strip()) names[ln] = p[3] _lvalues += [ln] elif '(' in lname: p[1] = '%s.lvalue'%lname lname = _getname(lname) _lvalues = [lname] names[lname] = '%s'%p[3] else: names[lname] = '%s'%p[3] _lvalues = [lname] p[0] = '%s = %s'%(p[1], p[3]) def p_statement_nogroup(p): """statement : NAME NAME | NAME NUMBER""" # treating cases like "hold on, axis square" p[0] = '%s("%s")'%(p[1], p[2]) def p_expr_list(p): '''exprlist : exprlist COMMA expression''' p[0] = '%s, %s'%(p[1], p[3]) def p_expr_list_2(p): 'exprlist : expression' p[0] = p[1] def p_expr_inlist(p): '''exprinlist : exprinlist COMMA expression | exprinlist SEMICOLON expression | exprinlist NEWLINE expression''' if p[2] in ['SEMICOLON', 'NEWLINE']: p[0] = '%s, OMPCSEMI, %s'%(p[1], p[3]) else: p[0] = '%s, %s'%(p[1], p[3]) def p_expr_inlist2(p): '''exprinlist : exprinlist expression''' p[0] = '%s, %s'%(p[1], p[2]) def p_expr_inlist_token(p): '''exprinlist : exprinlist SEMICOLON | exprinlist COMMA''' p[0] = p[1] def p_statement_empty(p): '''statement : empty''' p[0] = '' def p_expression_inlist_empty(p): "exprinlist : empty" p[0] = p[1] def p_empty(p): "empty : " p[0] = '' _pinlist = False def p_expr_inlist_2(p): '''exprinlist : expression''' global _pinlist _pinlist = True p[0] = p[1] def p_expression_binop(p): '''expression : expression '+' expression | expression '-' expression | expression '*' expression | expression '/' expression | expression '^' expression | expression DOTTIMES expression | expression DOTDIVIDE expression | expression DOTPOWER expression | expression NOTEQUAL expression | expression ISEQUAL expression | expression LESS expression | expression GREATER expression | expression LESSEQUAL expression | expression GREATEREQUAL expression | expression ELAND expression | expression ELOR expression | expression AND expression | expression OR expression''' if p[2] == '+' : p[0] = '%s + %s'%(p[1], p[3]) elif p[2] == '-' : p[0] = '%s - %s'%(p[1], p[3]) elif p[2] == '*' : p[0] = '%s * %s'%(p[1], p[3]) elif p[2] == '/' : p[0] = '%s / %s'%(p[1], p[3]) elif p[2] == '^' : p[0] = '%s ** %s'%(p[1], p[3]) elif p[2] == '.*' : p[0] = '%s *elmul* %s'%(p[1], p[3]) elif p[2] == './' : p[0] = '%s /eldiv/ %s'%(p[1], p[3]) elif p[2] == '.^' : p[0] = '%s **elpow** %s'%(p[1], p[3]) # conditional and logical elif p[2] == '~=' : p[0] = '%s != %s'%(p[1], p[3]) elif p[2] == '==' : p[0] = '%s == %s'%(p[1], p[3]) elif p[2] == '<' : p[0] = '%s < %s'%(p[1], p[3]) elif p[2] == '>' : p[0] = '%s > %s'%(p[1], p[3]) elif p[2] == '<=' : p[0] = '%s <= %s'%(p[1], p[3]) elif p[2] == '>=' : p[0] = '%s >= %s'%(p[1], p[3]) elif p[2] == '&' : p[0] = 'logical_and(%s, %s)'%(p[1], p[3]) elif p[2] == '|' : p[0] = 'logical_or(%s, %s)'%(p[1], p[3]) elif p[2] == '&&' : p[0] = '%s and %s'%(p[1], p[3]) elif p[2] == '||' : p[0] = '%s or %s'%(p[1], p[3]) def p_expression_not(p): "expression : NOT expression" p[0] = 'not %s'%p[2] def p_expression_uminus(p): "expression : '-' expression %prec UMINUS" p[0] = '-%s'%p[2] def p_expression_option(p): "cmd_option : '-' NAME" p[0] = '-%s'%p[2] def p_expression_uplus(p): "expression : '+' expression %prec UPLUS" p[0] = p[2] def p_expression_group(p): "expression : LPAREN exprlist RPAREN" p[0] = '(%s)'%p[2] def p_expression_empty_group(p): "expression : NAME LPAREN RPAREN" p[0] = '%s()'%p[1] def p_expr_mcat(p): 'expression : exprmcat' #if p[1] == '[]' p[0] = 'mcat(%s)'%p[1] def p_expression_list(p): """exprmcat : LBRACKET exprinlist RBRACKET""" global _pinlist _pinlist = False p[0] = '[%s]'%p[2] def p_expression_cell(p): "expression : LCURLY exprinlist RCURLY" global _pinlist _pinlist = False p[0] = 'mcellarray([%s])'%p[2] def p_expression_conjtranspose(p): 'expression : expression CONJTRANS' p[0] = '%s.cT'%p[1] def p_expression_transpose(p): 'expression : expression TRANS' p[0] = '%s.T'%p[1] def p_expression_string(p): "expression : STRING" p[0] = "mstring(%s)"%p[1] def p_expression_indexflat(p): "indexflat : LPAREN ':' RPAREN" p[0] = '(mslice[:])' def p_expr_flatslice(p): 'expression : ":"' p[0] = 'mslice[:]' def _check_name(name): from keyword import iskeyword if name == 'class': name = 'mclass' elif iskeyword(name) or isompcreserved(name): # FIXME ? maybe not # MATLAB does not allow names starting with '_', so we shuold be safe # prepending an underscore to the name of a variable name = '_' + name return name def p_expression_sub_flat(p): "expression : NAME indexflat" p[0] = '%s%s'%(p[1], p[2]) def p_expression_sub(p): "name_sub : NAME LPAREN exprlist RPAREN" p[0] = '%s(%s)'%(p[1], p[3]) def p_name_attr2(p): """name_attr : name_sub '.' NAME | name_attr '.' NAME | name_attr '.' name_sub | name_sub '.' name_sub""" p[0] = '%s.%s'%(p[1], p[3]) def p_name_attr(p): "name_attr : NAME" p[0] = '%s'%p[1] def p_expression_attr(p): "expression : name_attr" p[0] = p[1] def p_expression_sub2(p): """name_sub : NAME LCURLY exprinlist RCURLY""" p[0] = '%s(%s)'%(p[1], p[3]) def p_expression_items(p): "expression : name_sub" p[0] = '%s'%p[1] def p_expression_slice(p): """slice : expression ':' expression ':' expression | expression ':' expression""" if len(p) == 6: p[0] = '%s:%s:%s'%(p[1],p[3],p[5]) else: p[0] = '%s:%s'%(p[1],p[3]) def p_expression_mslice(p): "expression : slice" p[0] = 'mslice[%s]'%p[1] def p_expression_number(p): "expression : NUMBER" p[0] = p[1] def p_expression_name(p): "expression : NAME" _more = False _lineno = 0 def p_error(p): global _comment, _more, _pinlist, _lineno, _last_line if p: if p.value == 'NEWLINE' and _pinlist: _more = True else: _print_error(_last_line) _print_error("Syntax error at line %d '%s'!" %(_lineno, p.value)) pass else: if _pinlist: _more = True else: _print_error("Syntax error at EOF") import yacc yacc.yacc(debug=1) def translate(data, outfile=sys.stdout): """Entry point to the OMPC translator. This function functions as a preprocessor. There are aspect of M-language that are difficult (cause conflicts) to be solved by a parser. It is also much faster to implement some of the syntax by very simple checks. The preprocessor - combines continuations '...' (single line is submitted to the compiler) - removes comments, but makes it possible to add them later - """ global _lineno, _last_line from re import sub, finditer com = '' d = [] _lineno = 1 for x in data.split('\n'): # preprocess, the returned values are strip of whitespace, and # the optional coment is returned s, com = _ompc_preprocess(x) # if s is None a continuation was requested, submit the next line if s is None: continue _last_line = s yacc.myparse(s + '\n', outfile) # FIXME do something about the comments if s.strip(): _print3000(_gettabs()[:-4] + com.strip(), file=outfile) else: _print3000(com, file=outfile) com = '' _lineno += 1 def translate_to_str(data): from StringIO import StringIO out = StringIO() translate(data, out) return out.getvalue() _xbuf = '' def _myparse(x, outfile=sys.stdout): global _more, _xbuf, _outfile, _last_line _outfile = outfile _last_line = _xbuf + x ret = yacc.parse(_xbuf + x) if _more: # this takes care of the newline inside of [] and {}. We don't want # to have the newline as another token _xbuf += x.strip() if not _xbuf.endswith(';'): _xbuf += ';' _more = False else: _xbuf = '' more = False return ret yacc.myparse = _myparse # when searching for comments we make thigs easier by replacing contetns # of all strings with something else than "%" def _mysub(x): "Helper for replacement of strings." f, t = x.span() return 'x'*(t-f) _cont = [] def _ompc_preprocess(x): """OMPC preprocessor. Takes a single line of m-code and returns a tuple of stripped m-code and a comment. Continuation is requested by the 1st returned value set to None. """ global _cont, _pinlist from re import sub, findall, finditer # skip empty statements and take care of possible funny endlines # only '\n' is allowed into the parser x = x.replace('\r', '') if not x.strip(): return '', '' # remove comments x2 = sub(r"'((?:''|[^\n'])*)'", _mysub, x) pos = list(finditer(r'\s*%.*', x2)) com = '' if pos: pos = pos[0].start() com = x[pos:].replace('%', '#', 1) x = x[:pos] if not x.strip(): com = com.lstrip() # combine continuations _cont += [ x ] if x.strip().endswith('...'): _cont[-1] = x.strip()[:-3] return None, com # take care of lines like the following # "save a b c d -v7.3" # "hold" # these can be detected, they can not have '{}[]()=', they are simply # NAMEs and NUMBERs behind a name # FIXME should I make another parser just for this? LOC = ''.join(_cont) if not _pinlist: toks = LOC.split() if len(findall(r'[(){}\[\]=]', LOC)) == 0 and \ toks and toks[0] not in _keywords: from re import split names = [ x for x in split('[;,\s]*', LOC.strip()) if x ] # names = LOC.split() LOC = '%s(%s)'%( names[0], ', '.join([ "'%s'"%x for x in names[1:] ]) ) _cont = [] return LOC, com usage = """\ ompcply.py - to get ompc compiler test console ompcply.py lexdebug - to get the console with debug output from tokenizer. ompcply.py file.m - will translate an m-file to OMPC .pym file. The output is always to the standard output. """ __all__ = ['translate', 'yacc', 'lex'] if __name__ == "__main__": import sys, os LEXDEBUG = 0 if len(sys.argv) > 1: if sys.argv[1] == 'lexdebug': LEXDEBUG = 1 else: if not os.path.exists(sys.path[1]): print usage else: translate(open(sys.argv[1], 'U').read()) sys.exit() print "Welcome to OMPC compiler test console!" print "(Ctrl+C to exit)" print # the ompc prompt loop, break with Ctrl+C _lineno = 1 while 1: try: s = raw_input('ompc> ') + '\n' except EOFError: break # preprocess, the returned values are strip of whitespace, and # the optional coment is returned s, com = _ompc_preprocess(s) # if s is None a continuation was requested, submit the next line if s is None: continue # if s is empty don't do anything if not s: continue if LEXDEBUG: # Tokenize lex.input(s) while 1: tok = lex.token() if not tok: break # No more input print tok print _errors _errors = [] yacc.myparse(s) print
import argparse import socket import sys import numpy as np import random as random from random import sample import copy as cp from HandEvaluator import HandEvaluator from Brains import RationalBrain from Brains import AdaptiveBrain np.set_printoptions(linewidth=300) class Johnny: """ self.bot_name = bot_name in config file. Corresponds to PLAYER_X_NAME field. self.hand = a dictionary storing properties of the current hand. Has the following keys. hand_id = int. ID of hand. button = boolean. Are we the dealer? hole1 = hole card 1 hole2 = hole card 2 board = current known board cards action_history = a list of the last actions. action_history[i] = another list that describes the previous actions up until action point i. result = int. Chips won (can be a negative integer if we lost chips) self.state = A dictionary representing the our state in the current match. Has the following keys. my_bank = our current bankroll their_bank = their current bankroll time_bank = cumulative time remaining in the match """ def __init__(self, bot_name="P1", brain=AdaptiveBrain, restore_from=[]): self.bot_name = bot_name self.brain = brain(restore_from) self.state = {} self.reset_hand() def reset_hand(self): self.hand = {} self.hand['action_history'] = [] self.hand['pot_size'] = [] self.temporal_feature_matrix = [] self.possible_actions = [] self.brain.new_state = [] ### ------- PARSING ------- ### def parse_data(self, data): splits = data.split() packet_type = splits[0] if packet_type == "NEWHAND": self.parse_new_hand(splits) elif packet_type == "GETACTION": self.parse_get_action(splits) elif packet_type == "HANDOVER": self.parse_hand_over(splits) def parse_win_result(self, wr): splits = wr.split(":") amt = int(splits[1]) winner = splits[2] if len(self.temporal_feature_matrix) > 0: current_stake = self.temporal_feature_matrix[0,-1]*200.0 else: current_stake = 2 # we were the big blind. if winner == self.bot_name: return amt-current_stake else: return -current_stake def parse_hand_over(self, data_splits): # HANDOVER Stack1 Stack2 numBoardCards [boardCards] numLastActions [lastActions] timeBank # Can ignore Stack1 and Stack2 because we'll get it on the next NEWHAND packet. num_board_cards = int(data_splits[3]) counter = 4 self.hand['board'] = data_splits[counter:counter+num_board_cards] counter += num_board_cards num_last_actions = int(data_splits[counter]) counter += 1 self.hand['action_history'].append(data_splits[counter:counter+num_last_actions-1]) self.hand['winnings'] = self.parse_win_result(data_splits[counter+num_last_actions-1]) counter += num_last_actions self.state['time_bank'] = float(data_splits[-1]) def check_for_hand_update_and_update_hand(self, last_actions): for i in range(len(last_actions)): splits = last_actions[i].split(":") if len(splits) == 4 and splits[0] == "DISCARD": # DISCARD:(oldcard):(newcard):PLAYER if self.hand['hole1'] == splits[1]: self.hand['hole1'] = splits[2] else: self.hand['hole2'] = splits[2] print("Updated hand ... ") print([self.hand['hole1'], self.hand['hole2']]) def parse_get_action(self, data_splits): # GETACTION potSize numBoardCards [boardCards] numLastActions [lastActions] numLegalActions [legalActions] timebank self.hand['pot_size'].append(int(data_splits[1])) num_board_cards = int(data_splits[2]) counter = 3 self.hand['board'] = data_splits[counter:counter+num_board_cards] counter += num_board_cards num_last_actions = int(data_splits[counter]) counter += 1 self.check_for_hand_update_and_update_hand(data_splits[counter:counter+num_last_actions]) # update hand if discard was made. self.hand['action_history'].append(data_splits[counter:counter+num_last_actions]) counter += num_last_actions self.hand['winnings'] = 0 # if we're in a get action packet, then we haven't won anything yet. num_legal_actions = int(data_splits[counter]) counter += 1 self.possible_actions = data_splits[counter:counter+num_legal_actions] counter += num_legal_actions self.state['time_bank'] = float(data_splits[-1]) def parse_new_hand(self, data_splits): # NEWHAND handId button holeCard1 holeCard2 myBank otherBank timeBank self.reset_hand() self.hand['hand_id'] = int(data_splits[1]) self.hand['button'] = data_splits[2] self.hand['hole1'] = data_splits[3] self.hand['hole2'] = data_splits[4] self.state['my_bank'] = int(data_splits[5]) self.state['their_bank'] = int(data_splits[6]) self.state['time_bank'] = float(data_splits[7]) ### ----------------------- ### ### ------- FEATURE GENERATION ------- ### def update_temporal_feature_matrix(self): # Columns represent time steps in a hand. # Rows are as follows: # 0 - hero action # 1 - villain action # 2 - street # 3 - hero discard? # 4 - villain discard? al = [item for sublist in self.hand['action_history'] for item in sublist] # linearize action history list if len(al) > 0: if len(self.temporal_feature_matrix) > 0: # check if it's started to fill out. start_from_idx = self.temporal_feature_matrix.shape[1] else: self.temporal_feature_matrix = self.build_temporal_feature_vector(al[0]) start_from_idx = 1 for i in range(start_from_idx, len(al)): self.temporal_feature_matrix = np.hstack((self.temporal_feature_matrix, self.build_temporal_feature_vector(al[i]))) def build_temporal_feature_vector(self, performed_action): # Performed actions to expect. # BET:amount[:actor] # CALL[:actor] # CHECK[:actor] # DEAL:STREET # FOLD[:actor] # POST:amount:actor # DISCARD[:actor] # RAISE:amount[:actor] # REFUND:amount:actor # SHOW:card1:card2:actor # TIE:amount:actor # WIN:amount:actor NFEATURES = 5 STACKSIZE = 200.0 hero_idx = 0 villain_idx = 1 street_idx = 2 hero_discard_idx = 3 villain_discard_idx = 4 street = self.get_street() splits = performed_action.split(":") fv = np.zeros((NFEATURES,1)) if splits[0] == "BET": actor_idx = self.get_actor_idx(splits[-1]) amount = float(splits[1]) fv[actor_idx] = amount/STACKSIZE + np.max(self.temporal_feature_matrix[actor_idx]) fv[1-actor_idx] = self.temporal_feature_matrix[1-actor_idx,-1] fv[street_idx] = street elif splits[0] == "CALL": actor_idx = self.get_actor_idx(splits[-1]) player_to_call = 1 - actor_idx call_amt = np.max(self.temporal_feature_matrix[player_to_call]) fv[actor_idx] = call_amt fv[1-actor_idx] = self.temporal_feature_matrix[1-actor_idx,-1] fv[street_idx] = street elif splits[0] == "CHECK": actor_idx = self.get_actor_idx(splits[-1]) fv[actor_idx] = self.temporal_feature_matrix[actor_idx,-1] fv[1-actor_idx] = self.temporal_feature_matrix[1-actor_idx,-1] fv[street_idx] = street elif splits[0] == "DEAL": if splits[1] == "FLOP": street = 1 elif splits[1] == "TURN": street = 2 elif splits[1] == "RIVER": street = 3 fv[street_idx] = street fv[0] = self.temporal_feature_matrix[0,-1] fv[1] = self.temporal_feature_matrix[1,-1] elif splits[0] == "FOLD": # Hand is now over. nothing to do here. pass elif splits[0] == "POST": actor_idx = self.get_actor_idx(splits[-1]) amount = float(splits[1])/STACKSIZE fv[actor_idx] = amount fv[street_idx] = street if len(self.temporal_feature_matrix) > 0: fv[1-actor_idx] = self.temporal_feature_matrix[1-actor_idx,-1] elif splits[0] == "DISCARD": actor_idx = self.get_actor_idx(splits[-1]) fv[actor_idx+3] = 1 fv[street_idx] = street fv[actor_idx] = self.temporal_feature_matrix[actor_idx,-1] fv[1-actor_idx] = self.temporal_feature_matrix[1-actor_idx,-1] elif splits[0] == "RAISE": # Raise specifies the amount raised to, not the amount raised. # This creates some complications with respect to maintaining the # temporal feature matrix. # Basically, we need to add the raise value to the latest pot value from # the previous street, since multiple raises and re-raises specify # only the amount raised to. actor_idx = self.get_actor_idx(splits[-1]) max_of_prev_street = self.get_max_of_prev_street(actor_idx) amount = float(splits[1])/STACKSIZE + max_of_prev_street fv[actor_idx] = amount fv[1-actor_idx] = self.temporal_feature_matrix[1-actor_idx,-1] fv[street_idx] = street elif splits[0] == "REFUND": # Hand is now over. nothing to do here. pass elif splits[0] == "SHOW": # Hand is now over. nothing to do here. pass elif splits[0] == "TIE": # Hand is now over. nothing to do here. pass elif splits[0] == "WIN": # Hand is now over. nothing to do here. pass # Difference betting results? # if True and len(self.temporal_feature_matrix) > 0: # fv[0] -= self.temporal_feature_matrix[0,-1] # fv[1] -= self.temporal_feature_matrix[1,-1] return fv def get_street(self): if len(self.temporal_feature_matrix) == 0: # if has not been initialized street = 0 # preflop else: street = np.max(self.temporal_feature_matrix[2]) return street def get_max_of_prev_street(self, actor_idx): street = self.get_street() if street == 0: max_of_prev_street = 0 else: mask = self.temporal_feature_matrix[2] == street - 1 max_of_prev_street = np.max(self.temporal_feature_matrix[actor_idx, mask]) return max_of_prev_street def get_actor_idx(self, actor): if actor == self.bot_name: actor_idx = 0 else: actor_idx = 1 return actor_idx def check_synchrony_to_brain(self): if len(self.brain.new_state) > 0: ns = self.brain.new_state[0] print(ns) print(self.temporal_feature_matrix[:ns.shape[0]]) states_same = np.array_equal(ns, self.temporal_feature_matrix[:ns.shape[0]]) assert() ### ------------------------------- ### def run(self, input_socket): # Get a file-object for reading packets from the socket. # Using this ensures that you get exactly one packet per read. f_in = input_socket.makefile() while True: # Block until the engine sends us a packet. data = f_in.readline().strip() # If data is None, connection has closed. if not data: print "Gameover, engine disconnected." break # Here is where you should implement code to parse the packets from # the engine and act on it. We are just printing it instead. print(data) self.parse_data(data) self.update_temporal_feature_matrix() #self.check_synchrony_to_brain() # First before taking our next action, let's learn from the move we # made at the last decision point. self.brain.learn_from_last_action(self) # When appropriate, reply to the engine with a legal action. # The engine will ignore all spurious responses. # The engine will also check/fold for you if you return an # illegal action. # When sending responses, terminate each response with a newline # character (\n) or your bot will hang! word = data.split()[0] if word == "GETACTION": action = self.brain.make_decision(self) s.send(action + "\n") elif word == "REQUESTKEYVALUES": self.brain.Q.save(self.bot_name + '.h5') # At the end, the engine will allow your bot save key/value pairs. # Send FINISH to indicate you're done. s.send("FINISH\n") # Clean up the socket. s.close() if __name__ == '__main__': parser = argparse.ArgumentParser(description='A Pokerbot.', add_help=False, prog='pokerbot') parser.add_argument('-h', dest='host', type=str, default='localhost', help='Host to connect to, defaults to localhost') parser.add_argument('port', metavar='PORT', type=int, help='Port on host to connect to') args = parser.parse_args() # Create a socket connection to the engine. print 'Connecting to %s:%d' % (args.host, args.port) try: s = socket.create_connection((args.host, args.port)) except socket.error as e: print 'Error connecting! Aborting' exit() bot = Johnny() bot.run(s)
#!/usr/bin/env python3 """ Handles interactive prompts, part of the Old Spice Voicemail Generator. Each function prompts the user for a different piece of information. Functions: gender, phone, reason, ending, filename, show_settings, confirm """ import sys __author__ = 'Tony Zhaocheng Tan' __copyright__ = "Copyright 2015, Tony Zhaocheng Tan" __license__ = "MIT" __version__ = "1.0" __email__ = "tony@tonytan.io" male_reasons = {'a': "Building an orphanage for children with their bare hands" " while playing a sweet, sweet lullaby for those children" " with two mallets against their abs xylophone.", 'b': "Cracking walnuts with their man mind.", 'c': "Polishing their monocle smile.", 'd': "Ripping out mass loads of weights."} female_reasons = {'a': "Ingesting my delicious Old Spice man smell.", 'b': "Listening to me read romantic poetry" " while I make a bouquet of paper flowers from each read page.", 'c': "Enjoying a lobster dinner I prepared just for her while carrying her" " on my back safely through piranha infested waters.", 'd': "Being serenaded on the moon with the view of the earth" " while surviving off the oxygen in my lungs via a passionate kiss.", 'e': "Riding a horse backwards with me."} male_endings = {'a': "I'm on a horse.", 'b': "Do do do doot doo do do dooot.", 'c': "I'm on a phone.", 'd': "SWAN DIVE.", 'e': "This voicemail is now diamonds."} female_endings = {'a': "But she'll get back to you as soon as she can.", 'b': "Thanks for calling."} def gender(): """ Prompts the user to choose "male" or "female". :return: Returns "m" or "f" """ while True: gender = input("Would you like the: \n [1] Male version \n [2] Female version \n>") if gender in ("1", "m", "M", "male", "Male"): return "m" elif gender in ("2", "f", "F", "female", "Female"): return "f" else: print("Invalid input. \nPlease make sure you are entering the number of your choice.") def phone(): """ Prompts the user to enter a 10-digit phone number. Ignoring all input that is not a number, the function makes sure that the number does not start with "0" or "1", and then checks to see if the number is exactly 10 digits long. :return: Returns the number as a string. """ while True: number_raw = str(input("Please enter your 10-digit phone number: \n>")) number = "" have_number = False for char in number_raw: # Iterates through the user input, and appends all numerical values to "number" if char in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): number += char if char in ("2", "3", "4", "5", "6", "7", "8", "9"): # Checks if inputs includes a number that is not 0 or 1 have_number = True if have_number: # U.S. Area codes do not start with "0" or "1"; therefore they are removed from the beginning while number[0] in ("0", "1"): number = number[1:] if len(number) == 10: return number print("Invalid input. \nPlease make sure you are entering a valid 10-digit phone number" "starting with your area code.") def reason(gender): """ Prompts the user to select reasons to be included in the voicemail message. :param gender: 'm' or 'f'. :return: Returns the selected reasons as letters in a list. """ reasons = [] reason_num = 1 # How many reasons are being selected. while True: done = False # Represents whether the reason is done being selected. while not done: print("Please select reason number " + str(reason_num) + ":") if gender == "m": print("[a]", male_reasons['a'], "\n[b]", male_reasons['b'], "\n[c]", male_reasons['c'], "\n[d]", male_reasons['d']) elif gender == "f": print("[a]", female_reasons['a'], "\n[b]", female_reasons['b'], "\n[c]", female_reasons['c'], "\n[d]", female_reasons['d'], "\n[e]", female_reasons['e']) reason = input(">") if reason not in ("a", "b", "c", "d") \ and (reason != "e" or gender != "f") and reason not in ("A", "B", "C", "D") \ and (reason != "E" or gender != "f"): # Checks if the selected reason is valid for the gender. print("Invalid input. \nPlease make sure you are entering the letter of your choice.") else: reason = reason.lower() reasons.append(reason) done = True # The reason is done being selected. done = False # Represents if the question is done being answered. while not done: print("You have selected", str(reason_num), "reason(s).") more = input("Would you like to select more reasons? [yes/no] \n>") if more in ("yes", "y", "YES", "Y", "Yes"): reason_num += 1 done = True # Question answered, now loop will return to top to ask for next reason. elif more in ("no", "n", "NO", "N", "No"): return reasons # Reason selection over, return list of reasons. else: print("Invalid input. \nPlease make sure you are entering 'yes' or 'no'.") def ending(gender): """ Prompts the user to select endings to be included in the voicemail message. :param gender: 'm' or 'f'. :return: Returns the selected endings as letters in a list. """ if gender == "m": ending1 = "I'm on a horse." ending2 = "Do do do doot doo do do dooot." ending3 = "I'm on a phone." ending4 = "SWAN DIVE." ending5 = "This voicemail is now diamonds." elif gender == "f": ending1 = "But she'll get back to you as soon as she can." ending2 = "Thanks for calling." endings = [] ending_num = 1 # How many endings are being selected. while True: done = False # Represents whether the ending is done being selected. while not done: print("Please select ending number " + str(ending_num) + ":") if gender == "m": print("[a]", male_endings['a'], "\n[b]", male_endings['b'], "\n[c]", male_endings['c'], "\n[d]", male_endings['d'], "\n[e]", male_endings['e']) elif gender == "f": print("[a]", female_endings['a'], "\n[b]", female_endings['b']) end = input(">") if end not in ("a", "b") \ and (end not in ("c", "d", "e") or gender != "m") \ and end not in ("A", "B") \ and (end not in ("C", "D", "E") or gender != "m"): # Checks if the selected ending is valid for the gender. print("Invalid input. \nPlease make sure you are entering the letter of your choice.") else: end = end.lower() endings.append(end) done = True # The ending is done being selected. done = False # Represents if the question is done being answered. while not done: print("You have selected " + str(ending_num) + " ending(s).") more = input("Would you like to select more endings? [yes/no] \n>") if more in ("yes", "y", "YES", "Y", "Yes"): ending_num += 1 done = True # Question answered, now loop will return to top to ask for next reason. elif more in ("no", "n", "NO", "N", "No"): return endings # Ending selection over, return list of reasons. else: print("Invalid input. \nPlease make sure you are entering 'yes' or 'no'.") def filename(): """ Prompts the user for the desired file name of the output mp3 file. Checks to make sure the name is alphanumeric. :return: Returns the name as a string. """ while True: name = str(input("What would you like the audio file to be named? " "(You do not need to include the extension.)\n>")) if name.isalnum(): return name else: print("Invalid input. Please check to make sure that you have only entered letters and/or numbers.") def show_settings(gender, phone, reasons, endings): """ Prints out all the settings that the user has selected. :param gender: 'm' or 'f'. :param phone: a string that represents the phone number, consisting of exactly 10 digits. ('9876543210') :param reasons: a list of all the reasons a user selected, with each reason represented by a letter. (['a', 'c']) :param endings: a list of all the endings a user selected, with each ending represented by a letter. (['a', 'b']) :return: Nothing is returned. """ print("\nHere are the options you have selected:") if str(gender) == 'm': gender = "Male" elif str(gender) == 'f': gender = "Female" print("Gender:", gender) print("Phone number:", phone) print("\nYou have selected the following reasons:") if gender == "Male": for choice in reasons: if choice in male_reasons: print(male_reasons[choice]) else: print("Get Name Error! (Invalid reason)") sys.exit() print("\nYou have selected the following endings:") for choice in endings: if choice in male_endings: print(male_endings[choice]) else: print("Get Name Error! (Invalid ending)") sys.exit() elif gender == "Female": for choice in reasons: if choice in female_reasons: print(female_reasons[choice]) else: print("Get Name Error! (Invalid reason)") sys.exit() print("\nYou have selected the following endings:") for choice in endings: if choice in female_endings: print(female_endings[choice]) else: print("Get Name Error! (Invalid ending)") sys.exit() return def confirm(gender, phone, reasons, endings): """ Asks the user to confirm the settings that have been selected. :param gender: 'm' or 'f'. :param phone: a string that represents the phone number, consisting of exactly 10 digits. ('9876543210') :param reasons: a list of all the reasons a user selected, with each reason represented by a letter. (['a', 'c']) :param endings: a list of all the endings a user selected, with each ending represented by a letter. (['a', 'b']) :return: Returns True if the user has confirmed the settings. Returns False if the user wishes to start over. """ show_settings(gender, phone, reasons, endings) while True: done = input("\nIs the above information correct?[yes/no] \n>") if done in ("yes", "y", "YES", "Y", "Yes"): return True elif done in ("no", "n", "NO", "N", "No"): return False else: print("Invalid input. \nPlease make sure you are entering 'yes' or 'no'.")
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import logging from collections import defaultdict from dataclasses import dataclass from pants.backend.go.target_types import ( GoBinaryDependenciesField, GoBinaryMainPackage, GoBinaryMainPackageField, GoBinaryMainPackageRequest, GoImportPathField, GoModSourcesField, GoModTarget, GoPackageSourcesField, GoThirdPartyPackageDependenciesField, GoThirdPartyPackageTarget, ) from pants.backend.go.util_rules import first_party_pkg, import_analysis from pants.backend.go.util_rules.first_party_pkg import ( FallibleFirstPartyPkgAnalysis, FirstPartyPkgAnalysisRequest, FirstPartyPkgImportPath, FirstPartyPkgImportPathRequest, ) from pants.backend.go.util_rules.go_mod import GoModInfo, GoModInfoRequest from pants.backend.go.util_rules.import_analysis import GoStdLibImports from pants.backend.go.util_rules.third_party_pkg import ( AllThirdPartyPackages, AllThirdPartyPackagesRequest, ThirdPartyPkgAnalysis, ThirdPartyPkgAnalysisRequest, ) from pants.base.exceptions import ResolveError from pants.base.specs import AddressSpecs, SiblingAddresses from pants.engine.addresses import Address, AddressInput from pants.engine.rules import Get, MultiGet, collect_rules, rule from pants.engine.target import ( AllTargets, GeneratedTargets, GenerateTargetsRequest, InferDependenciesRequest, InferredDependencies, InjectDependenciesRequest, InjectedDependencies, InvalidFieldException, Targets, WrappedTarget, ) from pants.engine.unions import UnionMembership, UnionRule from pants.util.frozendict import FrozenDict from pants.util.logging import LogLevel logger = logging.getLogger(__name__) class AllGoTargets(Targets): pass @rule(desc="Find all Go targets in project", level=LogLevel.DEBUG) def find_all_go_targets(tgts: AllTargets) -> AllGoTargets: return AllGoTargets( t for t in tgts if t.has_field(GoImportPathField) or t.has_field(GoPackageSourcesField) ) @dataclass(frozen=True) class ImportPathToPackages: mapping: FrozenDict[str, tuple[Address, ...]] @rule(desc="Map all Go targets to their import paths", level=LogLevel.DEBUG) async def map_import_paths_to_packages(go_tgts: AllGoTargets) -> ImportPathToPackages: mapping: dict[str, list[Address]] = defaultdict(list) first_party_addresses = [] first_party_gets = [] for tgt in go_tgts: if tgt.has_field(GoImportPathField): import_path = tgt[GoImportPathField].value mapping[import_path].append(tgt.address) else: first_party_addresses.append(tgt.address) first_party_gets.append( Get(FirstPartyPkgImportPath, FirstPartyPkgImportPathRequest(tgt.address)) ) first_party_import_paths = await MultiGet(first_party_gets) for import_path_info, addr in zip(first_party_import_paths, first_party_addresses): mapping[import_path_info.import_path].append(addr) frozen_mapping = FrozenDict({ip: tuple(tgts) for ip, tgts in mapping.items()}) return ImportPathToPackages(frozen_mapping) class InferGoPackageDependenciesRequest(InferDependenciesRequest): infer_from = GoPackageSourcesField @rule(desc="Infer dependencies for first-party Go packages", level=LogLevel.DEBUG) async def infer_go_dependencies( request: InferGoPackageDependenciesRequest, std_lib_imports: GoStdLibImports, package_mapping: ImportPathToPackages, ) -> InferredDependencies: addr = request.sources_field.address maybe_pkg_analysis = await Get( FallibleFirstPartyPkgAnalysis, FirstPartyPkgAnalysisRequest(addr) ) if maybe_pkg_analysis.analysis is None: logger.error( f"Failed to analyze {maybe_pkg_analysis.import_path} for dependency inference:\n" f"{maybe_pkg_analysis.stderr}" ) return InferredDependencies([]) pkg_analysis = maybe_pkg_analysis.analysis inferred_dependencies = [] for import_path in ( *pkg_analysis.imports, *pkg_analysis.test_imports, *pkg_analysis.xtest_imports, ): if import_path in std_lib_imports: continue # Avoid a dependency cycle caused by external test imports of this package (i.e., "xtest"). if import_path == pkg_analysis.import_path: continue candidate_packages = package_mapping.mapping.get(import_path, ()) if len(candidate_packages) > 1: # TODO(#12761): Use ExplicitlyProvidedDependencies for disambiguation. logger.warning( f"Ambiguous mapping for import path {import_path} on packages at addresses: {candidate_packages}" ) elif len(candidate_packages) == 1: inferred_dependencies.append(candidate_packages[0]) else: logger.debug( f"Unable to infer dependency for import path '{import_path}' " f"in go_package at address '{addr}'." ) return InferredDependencies(inferred_dependencies) class InjectGoThirdPartyPackageDependenciesRequest(InjectDependenciesRequest): inject_for = GoThirdPartyPackageDependenciesField @rule(desc="Infer dependencies for third-party Go packages", level=LogLevel.DEBUG) async def inject_go_third_party_package_dependencies( request: InjectGoThirdPartyPackageDependenciesRequest, std_lib_imports: GoStdLibImports, package_mapping: ImportPathToPackages, ) -> InjectedDependencies: addr = request.dependencies_field.address go_mod_address = addr.maybe_convert_to_target_generator() wrapped_target, go_mod_info = await MultiGet( Get(WrappedTarget, Address, addr), Get(GoModInfo, GoModInfoRequest(go_mod_address)), ) tgt = wrapped_target.target pkg_info = await Get( ThirdPartyPkgAnalysis, ThirdPartyPkgAnalysisRequest( tgt[GoImportPathField].value, go_mod_info.digest, go_mod_info.mod_path ), ) inferred_dependencies = [] for import_path in pkg_info.imports: if import_path in std_lib_imports: continue candidate_packages = package_mapping.mapping.get(import_path, ()) if len(candidate_packages) > 1: # TODO(#12761): Use ExplicitlyProvidedDependencies for disambiguation. logger.warning( f"Ambiguous mapping for import path {import_path} on packages at addresses: {candidate_packages}" ) elif len(candidate_packages) == 1: inferred_dependencies.append(candidate_packages[0]) else: logger.debug( f"Unable to infer dependency for import path '{import_path}' " f"in go_third_party_package at address '{addr}'." ) return InjectedDependencies(inferred_dependencies) # ----------------------------------------------------------------------------------------------- # Generate `go_third_party_package` targets # ----------------------------------------------------------------------------------------------- class GenerateTargetsFromGoModRequest(GenerateTargetsRequest): generate_from = GoModTarget @rule(desc="Generate `go_third_party_package` targets from `go_mod` target", level=LogLevel.DEBUG) async def generate_targets_from_go_mod( request: GenerateTargetsFromGoModRequest, union_membership: UnionMembership, ) -> GeneratedTargets: generator_addr = request.generator.address go_mod_info = await Get(GoModInfo, GoModInfoRequest(request.generator[GoModSourcesField])) all_packages = await Get( AllThirdPartyPackages, AllThirdPartyPackagesRequest(go_mod_info.digest, go_mod_info.mod_path), ) def create_tgt(pkg_info: ThirdPartyPkgAnalysis) -> GoThirdPartyPackageTarget: return GoThirdPartyPackageTarget( {GoImportPathField.alias: pkg_info.import_path}, # E.g. `src/go:mod#github.com/google/uuid`. generator_addr.create_generated(pkg_info.import_path), union_membership, residence_dir=generator_addr.spec_path, ) return GeneratedTargets( request.generator, (create_tgt(pkg_info) for pkg_info in all_packages.import_paths_to_pkg_info.values()), ) # ----------------------------------------------------------------------------------------------- # The `main` field for `go_binary` # ----------------------------------------------------------------------------------------------- @rule(desc="Determine first-party package used by `go_binary` target", level=LogLevel.DEBUG) async def determine_main_pkg_for_go_binary( request: GoBinaryMainPackageRequest, ) -> GoBinaryMainPackage: addr = request.field.address if request.field.value: wrapped_specified_tgt = await Get( WrappedTarget, AddressInput, AddressInput.parse(request.field.value, relative_to=addr.spec_path), ) if not wrapped_specified_tgt.target.has_field(GoPackageSourcesField): raise InvalidFieldException( f"The {repr(GoBinaryMainPackageField.alias)} field in target {addr} must point to " "a `go_package` target, but was the address for a " f"`{wrapped_specified_tgt.target.alias}` target.\n\n" "Hint: you should normally not specify this field so that Pants will find the " "`go_package` target for you." ) return GoBinaryMainPackage(wrapped_specified_tgt.target.address) candidate_targets = await Get(Targets, AddressSpecs([SiblingAddresses(addr.spec_path)])) relevant_pkg_targets = [ tgt for tgt in candidate_targets if tgt.has_field(GoPackageSourcesField) and tgt.residence_dir == addr.spec_path ] if len(relevant_pkg_targets) == 1: return GoBinaryMainPackage(relevant_pkg_targets[0].address) wrapped_tgt = await Get(WrappedTarget, Address, addr) alias = wrapped_tgt.target.alias if not relevant_pkg_targets: raise ResolveError( f"The `{alias}` target {addr} requires that there is a `go_package` " f"target defined in its directory {addr.spec_path}, but none were found.\n\n" "To fix, add a target like `go_package()` or `go_package(name='pkg')` to the BUILD " f"file in {addr.spec_path}." ) raise ResolveError( f"There are multiple `go_package` targets for the same directory of the " f"`{alias}` target {addr}: {addr.spec_path}. It is ambiguous what to use as the `main` " "package.\n\n" f"To fix, please either set the `main` field for `{addr} or remove these " "`go_package` targets so that only one remains: " f"{sorted(tgt.address.spec for tgt in relevant_pkg_targets)}" ) class InjectGoBinaryMainDependencyRequest(InjectDependenciesRequest): inject_for = GoBinaryDependenciesField @rule async def inject_go_binary_main_dependency( request: InjectGoBinaryMainDependencyRequest, ) -> InjectedDependencies: wrapped_tgt = await Get(WrappedTarget, Address, request.dependencies_field.address) main_pkg = await Get( GoBinaryMainPackage, GoBinaryMainPackageRequest(wrapped_tgt.target[GoBinaryMainPackageField]), ) return InjectedDependencies([main_pkg.address]) def rules(): return ( *collect_rules(), *first_party_pkg.rules(), *import_analysis.rules(), UnionRule(InferDependenciesRequest, InferGoPackageDependenciesRequest), UnionRule(InjectDependenciesRequest, InjectGoThirdPartyPackageDependenciesRequest), UnionRule(InjectDependenciesRequest, InjectGoBinaryMainDependencyRequest), UnionRule(GenerateTargetsRequest, GenerateTargetsFromGoModRequest), )
# Copyright (c) 1996-2015 PSERC. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. """Tests for step-controlled PIPS-based AC optimal power flow. """ from os.path import dirname, join from numpy import array, ones, zeros, Inf, r_, ix_, argsort, arange from scipy.io import loadmat from scipy.sparse import spdiags, csr_matrix as sparse from pypower.ppoption import ppoption from pypower.runopf import runopf from pypower.loadcase import loadcase from pypower.opf import opf from pypower.idx_bus import \ BUS_AREA, BASE_KV, VMIN, VM, VA, LAM_P, LAM_Q, MU_VMIN, MU_VMAX from pypower.idx_gen import \ GEN_BUS, QMAX, QMIN, MBASE, APF, PG, QG, VG, MU_PMAX, MU_QMIN, \ PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX from pypower.idx_brch import \ ANGMAX, PF, QT, MU_SF, MU_ST, MU_ANGMAX, MU_ANGMIN, ANGMIN from pypower.idx_cost import NCOST from pypower.t.t_begin import t_begin from pypower.t.t_is import t_is from pypower.t.t_ok import t_ok from pypower.t.t_end import t_end def t_opf_pips_sc(quiet=False): """Tests for step-controlled PIPS-based AC optimal power flow. @author: Ray Zimmerman (PSERC Cornell) """ num_tests = 101 t_begin(num_tests, quiet) tdir = dirname(__file__) casefile = join(tdir, 't_case9_opf') verbose = 0#not quiet t0 = 'PIPS-sc : ' ppopt = ppoption(OPF_VIOLATION=1e-6, PDIPM_GRADTOL=1e-8, PDIPM_COMPTOL=1e-8, PDIPM_COSTTOL=1e-9) ppopt = ppoption(ppopt, OUT_ALL=0, VERBOSE=verbose, OPF_ALG=565) ## set up indices ib_data = r_[arange(BUS_AREA + 1), arange(BASE_KV, VMIN + 1)] ib_voltage = arange(VM, VA + 1) ib_lam = arange(LAM_P, LAM_Q + 1) ib_mu = arange(MU_VMAX, MU_VMIN + 1) ig_data = r_[[GEN_BUS, QMAX, QMIN], arange(MBASE, APF + 1)] ig_disp = array([PG, QG, VG]) ig_mu = arange(MU_PMAX, MU_QMIN + 1) ibr_data = arange(ANGMAX + 1) ibr_flow = arange(PF, QT + 1) ibr_mu = array([MU_SF, MU_ST]) ibr_angmu = array([MU_ANGMIN, MU_ANGMAX]) ## get solved AC power flow case from MAT-file soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True) ## defines bus_soln, gen_soln, branch_soln, f_soln bus_soln = soln9_opf['bus_soln'] gen_soln = soln9_opf['gen_soln'] branch_soln = soln9_opf['branch_soln'] f_soln = soln9_opf['f_soln'][0] ## run OPF t = t0 r = runopf(casefile, ppopt) bus, gen, branch, f, success = \ r['bus'], r['gen'], r['branch'], r['f'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) ## run with automatic conversion of single-block pwl to linear costs t = ''.join([t0, '(single-block PWL) : ']) ppc = loadcase(casefile) ppc['gencost'][2, NCOST] = 2 r = runopf(ppc, ppopt) bus, gen, branch, f, success = \ r['bus'], r['gen'], r['branch'], r['f'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) xr = r_[r['var']['val']['Va'], r['var']['val']['Vm'], r['var']['val']['Pg'], r['var']['val']['Qg'], 0, r['var']['val']['y']] t_is(r['x'], xr, 8, [t, 'check on raw x returned from OPF']) ## get solved AC power flow case from MAT-file soln9_opf_Plim = loadmat(join(tdir, 'soln9_opf_Plim.mat'), struct_as_record=True) ## defines bus_soln, gen_soln, branch_soln, f_soln bus_soln = soln9_opf_Plim['bus_soln'] gen_soln = soln9_opf_Plim['gen_soln'] branch_soln = soln9_opf_Plim['branch_soln'] f_soln = soln9_opf_Plim['f_soln'][0] ## run OPF with active power line limits t = ''.join([t0, '(P line lim) : ']) ppopt1 = ppoption(ppopt, OPF_FLOW_LIM=1) r = runopf(casefile, ppopt1) bus, gen, branch, f, success = \ r['bus'], r['gen'], r['branch'], r['f'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) ##----- test OPF with quadratic gen costs moved to generalized costs ----- ppc = loadcase(casefile) ppc['gencost'] = array([ [2, 1500, 0, 3, 0.11, 5, 0], [2, 2000, 0, 3, 0.085, 1.2, 0], [2, 3000, 0, 3, 0.1225, 1, 0] ]) r = runopf(ppc, ppopt) bus_soln, gen_soln, branch_soln, f_soln, success = \ r['bus'], r['gen'], r['branch'], r['f'], r['success'] branch_soln = branch_soln[:, :MU_ST + 1] A = None l = array([]) u = array([]) nb = ppc['bus'].shape[0] # number of buses ng = ppc['gen'].shape[0] # number of gens thbas = 0; thend = thbas + nb vbas = thend; vend = vbas + nb pgbas = vend; pgend = pgbas + ng # qgbas = pgend; qgend = qgbas + ng nxyz = 2 * nb + 2 * ng N = sparse((ppc['baseMVA'] * ones(ng), (arange(ng), arange(pgbas, pgend))), (ng, nxyz)) fparm = ones((ng, 1)) * array([[1, 0, 0, 1]]) ix = argsort(ppc['gen'][:, 0]) H = 2 * spdiags(ppc['gencost'][ix, 4], 0, ng, ng, 'csr') Cw = ppc['gencost'][ix, 5] ppc['gencost'][:, 4:7] = 0 ## run OPF with quadratic gen costs moved to generalized costs t = ''.join([t0, 'w/quadratic generalized gen cost : ']) r = opf(ppc, A, l, u, ppopt, N, fparm, H, Cw) f, bus, gen, branch, success = \ r['f'], r['bus'], r['gen'], r['branch'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) t_is(r['cost']['usr'], f, 12, [t, 'user cost']) ##----- run OPF with extra linear user constraints & costs ----- ## single new z variable constrained to be greater than or equal to ## deviation from 1 pu voltage at bus 1, linear cost on this z ## get solved AC power flow case from MAT-file soln9_opf_extras1 = loadmat(join(tdir, 'soln9_opf_extras1.mat'), struct_as_record=True) ## defines bus_soln, gen_soln, branch_soln, f_soln bus_soln = soln9_opf_extras1['bus_soln'] gen_soln = soln9_opf_extras1['gen_soln'] branch_soln = soln9_opf_extras1['branch_soln'] f_soln = soln9_opf_extras1['f_soln'][0] row = [0, 0, 1, 1] col = [9, 24, 9, 24] A = sparse(([-1, 1, 1, 1], (row, col)), (2, 25)) u = array([Inf, Inf]) l = array([-1, 1]) N = sparse(([1], ([0], [24])), (1, 25)) ## new z variable only fparm = array([[1, 0, 0, 1]]) ## w = r = z H = sparse((1, 1)) ## no quadratic term Cw = array([100.0]) t = ''.join([t0, 'w/extra constraints & costs 1 : ']) r = opf(casefile, A, l, u, ppopt, N, fparm, H, Cw) f, bus, gen, branch, success = \ r['f'], r['bus'], r['gen'], r['branch'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) t_is(r['var']['val']['z'], 0.025419, 6, [t, 'user variable']) t_is(r['cost']['usr'], 2.5419, 4, [t, 'user cost']) ##----- test OPF with capability curves ----- ppc = loadcase(join(tdir, 't_case9_opfv2')) ## remove angle diff limits ppc['branch'][0, ANGMAX] = 360 ppc['branch'][8, ANGMIN] = -360 ## get solved AC power flow case from MAT-file soln9_opf_PQcap = loadmat(join(tdir, 'soln9_opf_PQcap.mat'), struct_as_record=True) ## defines bus_soln, gen_soln, branch_soln, f_soln bus_soln = soln9_opf_PQcap['bus_soln'] gen_soln = soln9_opf_PQcap['gen_soln'] branch_soln = soln9_opf_PQcap['branch_soln'] f_soln = soln9_opf_PQcap['f_soln'][0] ## run OPF with capability curves t = ''.join([t0, 'w/capability curves : ']) r = runopf(ppc, ppopt) f, bus, gen, branch, success = \ r['f'], r['bus'], r['gen'], r['branch'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) ##----- test OPF with angle difference limits ----- ppc = loadcase(join(tdir, 't_case9_opfv2')) ## remove capability curves ppc['gen'][ix_(arange(1, 3), [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX])] = zeros((2, 6)) ## get solved AC power flow case from MAT-file soln9_opf_ang = loadmat(join(tdir, 'soln9_opf_ang.mat'), struct_as_record=True) ## defines bus_soln, gen_soln, branch_soln, f_soln bus_soln = soln9_opf_ang['bus_soln'] gen_soln = soln9_opf_ang['gen_soln'] branch_soln = soln9_opf_ang['branch_soln'] f_soln = soln9_opf_ang['f_soln'][0] ## run OPF with angle difference limits t = ''.join([t0, 'w/angle difference limits : ']) r = runopf(ppc, ppopt) f, bus, gen, branch, success = \ r['f'], r['bus'], r['gen'], r['branch'], r['success'] t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 1, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) t_is(branch[:, ibr_angmu ], branch_soln[:, ibr_angmu ], 2, [t, 'branch angle mu']) ##----- test OPF with ignored angle difference limits ----- ## get solved AC power flow case from MAT-file soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True) ## defines bus_soln, gen_soln, branch_soln, f_soln bus_soln = soln9_opf['bus_soln'] gen_soln = soln9_opf['gen_soln'] branch_soln = soln9_opf['branch_soln'] f_soln = soln9_opf['f_soln'][0] ## run OPF with ignored angle difference limits t = ''.join([t0, 'w/ignored angle difference limits : ']) ppopt1 = ppoption(ppopt, OPF_IGNORE_ANG_LIM=1) r = runopf(ppc, ppopt1) bus, gen, branch, f, success = \ r['bus'], r['gen'], r['branch'], r['f'], r['success'] ## ang limits are not in this solution data, so let's remove them branch[0, ANGMAX] = 360 branch[8, ANGMIN] = -360 t_ok(success, [t, 'success']) t_is(f, f_soln, 3, [t, 'f']) t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data']) t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage']) t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda']) t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu']) t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data']) t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch']) t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu']) t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data']) t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow']) t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu']) t_end() if __name__ == '__main__': t_opf_pips_sc(quiet=False)
# -*- coding: utf-8 -*- from random import random from math import cos, pi from functools import lru_cache, partial WIDTH = 10 HEIGHT = 16 MAX_TMP = 40 TMP_1 = 26 BLOCK_SIZE_H = 15 MAX_ENERGY = 10 class FailedException(BaseException): pass class InsufficientEnergyException(BaseException): def __init__(self, e): self.e = e const = lambda x: lambda *y, **k: x def tic_orange(self, **kw): self.tmp = self.tmp - 1 if self.tmp == TMP_1: if not self.is_bottom: if self.down.color == block.RED: self.down.tmp2 = block.ORANGE elif self.down.color == block.WHITE: self.down.color = block.ORANGE self.down.tmp = MAX_TMP elif self.tmp == 0: self.color = block.WHITE def tic_blue(self, **kw): if self.tmp < MAX_TMP: if kw['time'] % 4 == 0: self.tmp = self.tmp + 1 elif not self.is_top: if self.up.color == block.BLUE_FALLING: self.up.color = block.BLUE self.up.tmp2 = 0 self.color = block.BLUE_STATIC elif self.up.color == block.WHITE: self.up.color = block.BLUE self.color = block.BLUE_STATIC else: self.tmp = MAX_TMP def tic_blue_falling(self, **kw): if not (self.down.color == block.BLUE and self.tmp <= self.tmp2): self.tmp = self.tmp - 1 if self.tmp <= 0: self.tmp = self.tmp2 = 0 self.color = block.WHITE if self.tmp < TMP_1 and self.down.color == block.WHITE: self.down.color = block.BLUE_FALLING self.down.tmp = MAX_TMP self.down.tmp2 = self.tmp2 def tic_shining(self, **kw): if self.tmp > 0: if kw['time'] % 2 == 0: self.tmp = self.tmp - 1 else: self.color = self.tmp2 def tic_blue_transiting(self, **kw): if self.tmp2 > 0: if kw['time'] % 2 == 0: self.tmp2 = self.tmp2 - 1 else: self.tmp2 = self.tmp self.color = block.BLUE_FALLING def tic_green(self, **kw): self.tmp = self.tmp - 1 if self.tmp == TMP_1: if not self.is_top: if self.up.color == block.WHITE: self.up.color = block.GREEN self.up.tmp = MAX_TMP else: self.color = block.GREEN_FADING self.tmp = MAX_TMP elif self.tmp == 0: self.color = block.WHITE def tic_green_fading(self, **kw): if kw['time'] % 3 == 0: self.tmp = self.tmp - 1 if self.tmp == 0: self.color = block.WHITE def tic_yellow(self, **kw): self.tmp = self.tmp - 1 if self.tmp == TMP_1: if not (self.is_top or self.up.color == block.BISTRE): self.up.color = block.YELLOW self.up.tmp = MAX_TMP elif self.tmp == 0: self.color = block.WHITE def tic_bistre(self, **kw): self.tmp = self.tmp - 1 if self.tmp == TMP_1: if not self.is_bottom: if self.down.color == block.RED: self.down.tmp2 = block.BISTRE elif True: self.down.color = block.BISTRE self.down.tmp = MAX_TMP elif self.tmp == 0: self.color = block.WHITE def tic_grey(self, **kw): self.tmp = self.tmp - 1 if self.tmp == 0: self.color = block.WHITE def tic_red(self, **kw): if self.tmp2 != block.WHITE: self.tmp = self.tmp - 1 if self.tmp == 0: raise FailedException def tic_brass(self, **kw): if not self.is_top and self.up.color == block.WHITE: self.up.color = block.BRASS self.up.tmp = self.tmp else: self.tmp = self.tmp - 1 if self.tmp == 0: self.color = block.WHITE tic_white = tic_blue_static = const(None) tic_functions = [tic_white, tic_red, tic_orange, tic_blue, tic_blue_static, tic_blue_falling, tic_shining, tic_blue_transiting, tic_green, tic_green_fading, tic_yellow, tic_bistre, tic_grey, tic_brass] @lru_cache() def vertically_flipped_block(color, front, tmp): h0 = BLOCK_SIZE_H*cos(tmp/MAX_TMP*2*pi) if h0 >= 0: c = front h = h0 else: c = color h = -h0 return c, lambda x, y: (x - BLOCK_SIZE_H, y - h, x + BLOCK_SIZE_H, y + h) static_block = lambda color: (color, lambda x, y:(x-BLOCK_SIZE_H, y-BLOCK_SIZE_H, x+BLOCK_SIZE_H, y+BLOCK_SIZE_H)) @lru_cache() def transiting_block(color_0, color_max, tmp): r0, g0, b0 = color_0 rm, gm, bm = color_max m = tmp/MAX_TMP l = 1.0 - m return static_block((round(l*r0+m*rm), round(l*g0+m*gm), round(l*b0+m*bm))) apr_red = lambda self: vertically_flipped_block((255,10,10), block.flippable_color_list[self.tmp2], self.tmp) apr_white = const(static_block((128,128,128))) apr_orange = lambda self: vertically_flipped_block((255,165,0),(128,128,128), self.tmp) apr_blue = apr_blue_falling = apr_blue_transiting = lambda self: \ vertically_flipped_block((64,64,255), (128,128,128), self.tmp/2) apr_blue_static = const(static_block((10,10,255))) apr_shining = lambda self: static_block([(255,255,255),(64,64,64)][self.tmp%2]) apr_green = lambda self: vertically_flipped_block((0,102,0), (128,128,128), self.tmp) apr_green_fading = lambda self: transiting_block((128,128,128), (0,102,0), self.tmp) apr_yellow = lambda self: vertically_flipped_block((255,255,0), (128,128,128), self.tmp) apr_bistre = lambda self: vertically_flipped_block((61,43,31),(128,128,128), self.tmp) apr_grey = lambda self: vertically_flipped_block((64,64,64), (128,128,128), self.tmp) apr_brass = lambda self: transiting_block((128,128,128), (205,149,117), self.tmp*2) apr_functions = [apr_white, apr_red, apr_orange, apr_blue, apr_blue_static, apr_blue_falling, apr_shining, apr_blue_transiting, apr_green, apr_green_fading, apr_yellow, apr_bistre, apr_grey, apr_brass] class block: WHITE = 0 RED = 1 ORANGE = 2 BLUE = 3 BLUE_STATIC = 4 BLUE_FALLING = 5 SHINING = 6 BLUE_TRANSITING = 7 GREEN = 8 GREEN_FADING = 9 YELLOW = 10 BISTRE = 11 GREY = 12 BRASS = 13 flippable_color_list = {WHITE:(128,128,128), ORANGE:(255,165,0), BISTRE:(61,43,31)} @property def color(self): return self._color @color.setter def color(self, value): self._color = value self.tic_function = tic_functions[value] self.rep_function = apr_functions[value] def __init__(self): self.color = block.WHITE self.tmp = 0 self.tmp2 = 0 def tic(self, t): self.tic_function(self, time=t) @property def appearance(self): return self.rep_function(self) class base_game: LEVEL = 1 def __init__(self): self.blocks = [[block() for x in range(1, WIDTH+1)] for y in range(HEIGHT+1)] self.family = lambda x, y: self.blocks[y][x-1] for y in range(HEIGHT+1): for x in range(1, WIDTH+1): b = self.family(x, y) b.is_far_left = x == 1 b.is_far_right = x == WIDTH b.is_bottom = y == 0 b.is_top = y == HEIGHT b.left = b.is_far_left or self.family(x-1, y) b.right = b.is_far_right or self.family(x+1, y) b.up = b.is_top or self.family(x, y+1) b.down = b.is_bottom or self.family(x, y-1) self.cursor = self.family(4, 0) self.cursor.color = block.RED self.cursor.tmp = MAX_TMP // 2 self.energy_bar = [block() for x in range(MAX_ENERGY)] for i in range(MAX_ENERGY): b = self.energy_bar[i] b.is_bottom = i == 0 b.is_top = i == MAX_ENERGY-1 b.up = b.is_top or self.energy_bar[i+1] b.down = b.is_bottom or self.energy_bar[i-1] self.energy_bar[0].color = block.BLUE self.time= 0 def update(self, scorecallback=const(None)): self.time = self.time + 1 for s in self.blocks: for t in s: t.tic(self.time) for u in self.energy_bar: u.tic(self.time) if self.time % (MAX_TMP//4) == 0: scorecallback(1) if self.time % MAX_TMP == 0: for x in range(1, WIDTH+1): b = self.family(x, HEIGHT) if b.color == block.WHITE: if random() < 2/7: b.color = block.ORANGE b.tmp = MAX_TMP elif random() < 1/20: b.color = block.BISTRE b.tmp = MAX_TMP def move_cursor_left(self, failcallback=lambda:print('Cannot move left')): if not self.cursor.is_far_left \ and self.cursor.left.color == self.cursor.tmp2 == block.WHITE: self.cursor.color = block.WHITE self.cursor.tmp = 0 self.cursor = self.cursor.left self.cursor.color = block.RED self.cursor.tmp = MAX_TMP // 2 self.cursor.tmp2 = block.WHITE else: failcallback() def move_cursor_right(self, failcallback=lambda:print('Cannot move right')): if not self.cursor.is_far_right \ and self.cursor.right.color == self.cursor.tmp2 == block.WHITE: self.cursor.color = block.WHITE self.cursor.tmp = 0 self.cursor = self.cursor.right self.cursor.color = block.RED self.cursor.tmp = MAX_TMP // 2 self.cursor.tmp2 = block.WHITE else: failcallback() def use_energy(self, n): try: e = [b.color for b in self.energy_bar].index(block.BLUE) except ValueError: raise InsufficientEnergyException(-1) if e >= n: self.energy_bar[e-n].color = block.SHINING self.energy_bar[e-n].tmp = 5 self.energy_bar[e-n].tmp2 = block.BLUE for b in self.energy_bar[e-n+1:e]: b.color = block.SHINING b.tmp = 5 b.tmp2 = block.WHITE self.energy_bar[e].color = block.BLUE_TRANSITING self.energy_bar[e].tmp2 = 5 else: raise InsufficientEnergyException(e) def powerup_mono(self, scorecallback=partial(print, "Increase score by"), failcallback=lambda:"energy is insufficient"): try: self.use_energy(2) self.cursor.up.color = block.GREEN self.cursor.up.tmp = MAX_TMP scorecallback(25) except InsufficientEnergyException: try: self.use_energy(1) self.cursor.up.color = block.BRASS self.cursor.up.tmp = MAX_TMP // 2 scorecallback(15) except InsufficientEnergyException: failcallback() def powerup_line(self, scorecallback=partial(print, "Increase score by"), failcallback=lambda:"energy is insufficient"): try: self.use_energy(3) self.cursor.up.color = block.YELLOW self.cursor.up.tmp = MAX_TMP scorecallback(50) except InsufficientEnergyException: failcallback() def powerup_ninja(self, scorecallback=partial(print, "Increase score by"), failcallback=lambda:"energy is insufficient"): try: self.use_energy(4) for y in range(HEIGHT+1): for x in range(1, WIDTH+1): b = self.family(x, y) if b.color == block.ORANGE: b.color = block.GREY scorecallback(70) except InsufficientEnergyException as isee: if isee.e == -1: k = [b.color for b in self.energy_bar].index(block.SHINING) if k+4 < MAX_ENERGY and self.energy_bar[k+3].color == block.SHINING: self.energy_bar[k].tmp += MAX_TMP for y in range(HEIGHT+1): for x in range(1, WIDTH+1): b = self.family(x, y) if b.color == block.BISTRE: b.color = block.ORANGE elif b.color == block.ORANGE: b.color = block.GREY else: failcallback() def __repr__(self): """ game.__repr__() This method is mainly designed for testing """ CLIST = ['W ', 'R ', 'O ', 'B+', 'B=', 'B-', 'S^', 'B^', 'G ', 'G-', 'Y ', 'BI', 'GR', 'BR'] s = "game object at time %d\n" % self.time s = s + '-'*61 + '\n' for y in range(HEIGHT+1): s = s + '| ' for x in range(1, WIDTH+1): b = self.family(x, y) s = s + "%s%2d| "%(CLIST[b.color], b.tmp) if y < MAX_ENERGY: b = self.energy_bar[y] s = s + ' '*4 s = s + '|%s%2d %2d|'%(CLIST[b.color], b.tmp, b.tmp2) s = s + '\n' return s class derived_game(base_game): def __init__(self, g): #for attr in ['blocks', 'family', 'cursor', 'energy_bar', 'time']: # exec("self.%s = g.%s" % (attr, attr)) for k in vars(g): self.__setattr__(k, g.__getattribute__(k)) for y in range(HEIGHT+1): for x in range(1, WIDTH+1): b = self.family(x, y) if not (b.color == block.WHITE or b.color == block.RED): b.color = block.GREY b.tmp2 = 0 class lv2_game(derived_game): LEVEL = 2 def update(self, scorecallback=const(None)): self.time = self.time + 1 for s in self.blocks: for t in s: t.tic(self.time) for u in self.energy_bar: u.tic(self.time) if self.time % (MAX_TMP//3) == 0: scorecallback(1) if self.time % MAX_TMP == 0: for x in range(1, WIDTH+1): b = self.family(x, HEIGHT) if b.color == block.WHITE: if random() < 3/7: b.color = block.ORANGE b.tmp = MAX_TMP elif random() < 1/15: b.color = block.BISTRE b.tmp = MAX_TMP class lv3_game(derived_game): LEVEL = 3 def update(self, scorecallback=const(None)): base_game.update(self, scorecallback) base_game.update(self, scorecallback) class lv4_game(derived_game): LEVEL = 4 def update(self, scorecallback=const(None)): lv2_game.update(self, scorecallback) lv2_game.update(self, scorecallback) if __name__ == '__main__': g = base_game() step = lambda n=1 : const(g)([g.update() for i in range(n)]) ml = lambda: const(g)(g.move_cursor_left()) mr = lambda: const(g)(g.move_cursor_right()) use = lambda n : const(g)(g.use_energy(n))
""" lineinfo.py -- dealing with Doom linedef trigger types. Provides functions to create a human-readable description code from a trigger number, and the inverse operation. Guide to Trigger Description Codes (R): Example: "FLOOR SR UP SLOW CRUSH LNC-8" Categories: DOOR - Doors (regular and locked) FLOOR - Floor movers CEIL - Ceiling movers PLAT - Platforms and lifts CRUSHER - Crushers STAIR - Stair builders ELEVATOR - Boom elevators LIGHT - Light changers EXIT - Exits TELEPORT - Teleports DONUT - Donuts (lower pillar, raise surrounding sector) TRANSFER - Transfer properties (Boom) SCROLL - Scroll lines and sectors (Boom) Trigger types: P1 - Push(door) trigger, works once PR - Push(door) trigger, works repeatedly S1 - Switch, works once SR - Switch, works repeatedly W1 - Walk across, works once WR - Walk across, works repeatedly G1 - Shoot, works once GR - Shoot, works repeatedly Door locks: YEL - Yellow key lock RED - Red key lock BLU - Blue key lock Door types: OWC - Open, Wait, Close CWO - Close, Wait, Open OSO - Open, Stay Open CSC - Close, Stay Closed Motion speed SLOW - Slow NORM - Normal FAST - Fast TURB - Turbo INST - Instant Delay times 3SEC - 3 seconds 4SEC - 4 seconds 30SEC - 30 seconds Sector property changers: CPYTEX - Copy Texture CPYTEX+DELTYPE - Copy Texture, Reset type CPYTEX+TYPE - Copy Texture and Type Directions: DOWN - Down UP - Up NOMOVE - Stay (only change properties) Miscellaneous: SECRET - A secret exit MONSTER - Monsters can activate the trigger LINE - Line teleporters REVERSE - Line teleporter, reversed SILENT - Make crushers or teleporters silent CRUSH - Enable crushing (for CEILs and FLOORs, not to be confused with CRUSHERs) Destinations/platforms: LNF - Lowest Neighbor Floor LNC - Lowest Neighbor Ceiling HNF - Highest Neighbor Floor HNC - Highest Neighbor Ceiling NNF - Next Neighbor Floor NNC - Next Neighbor Ceiling HNF+8 - 8 above Highest neighbor Floor LNC+8 - 8 under Lowest neighbor Ceiling F+8 - 8 above floor 8 - 8 units Absolute (STAIRs only) 16 - 16 units Absolute (STAIRs only) 24 - 24 units Absolute 32 - 32 units Absolute 512 - 512 units absolute SLT - Shortest Lower Texture SUT - Shortest Upper Texture NLF - Next Lowest neighbor Floor NLC - Next Lowest neighbor Ceiling NHF - Next Highest neighbor Floor CURRF - Current Floor (ELEVATORs) FLR - Floor CL - Ceiling NAF - Next adjacent floor PERP - Perpetual STOP - Stop ongoing motion Models: TRIG - Use trigger sector as model NUM - Lookup adjacent model numerically Lighting: 35 - 35 units 255 - 255 units MAXN - Maximum Neighbor MINN - Minimum Neighbor BLINK - Blinking Transfers (check boomref.txt for more info): FLIGHT - Transfer floor light level CLIGHT - Transfer ceiling light level TRANSLUCENCY - Transfer line translucency HEIGHTS - The famous 242! FRICTION - Transfer friction WIND - Transfer current POINTFORCE - Transfer force point (?) Scrollers (check boomref.txt for more info): CARRY - Carry objects (conveyor) WRTSECTOR - With respect to 1st side's sector ACCEL - Accelerate scrolling RIGHT - Right direction LEFT - Left direction WALL - Scroll wall SYNCED - Sync scrolling to sector OFFSETS - Scroll by offsets """ from fnmatch import fnmatchcase # Define description codes for the standard triggers desc2num = \ { "": 0, "NO ACTION":0, # Doors "DOOR PR SLOW OWC 4SEC MONSTER":1, "DOOR PR FAST OWC 4SEC":117, "DOOR SR SLOW OWC 4SEC":63, "DOOR SR FAST OWC 4SEC":114, "DOOR S1 SLOW OWC 4SEC":29, "DOOR S1 FAST OWC 4SEC":111, "DOOR WR SLOW OWC 4SEC":90, "DOOR WR FAST OWC 4SEC":105, "DOOR W1 SLOW OWC 4SEC":4, "DOOR W1 FAST OWC 4SEC":108, "DOOR P1 SLOW OSO":31, "DOOR P1 FAST OSO":118, "DOOR SR SLOW OSO":61, "DOOR SR FAST OSO":114, "DOOR S1 SLOW OSO":103, "DOOR S1 FAST OSO":112, "DOOR WR SLOW OSO":86, "DOOR WR FAST OSO":106, "DOOR W1 SLOW OSO":2, "DOOR W1 FAST OSO":109, "DOOR GR FAST OSO":46, "DOOR SR SLOW CSC":42, "DOOR SR FAST CSC":116, "DOOR S1 SLOW CSC":50, "DOOR S1 FAST CSC":113, "DOOR WR SLOW CSC":75, "DOOR WR FAST CSC":107, "DOOR W1 SLOW CSC":3, "DOOR W1 FAST CSC":110, "DOOR SR SLOW CWO 30SEC":196, "DOOR S1 SLOW CWO 30SEC":175, "DOOR WR SLOW CWO 30SEC":76, "DOOR W1 SLOW CWO 30SEC":16, "DOOR PR SLOW OWC 4SEC BLU":26, "DOOR PR SLOW OWC 4SEC RED":28, "DOOR PR SLOW OWC 4SEC YEL":27, "DOOR P1 SLOW OSO BLU":32, "DOOR P1 SLOW OSO RED":33, "DOOR P1 SLOW OSO YEL":34, "DOOR SR FAST OSO BLU":99, "DOOR SR FAST OSO RED":134, "DOOR SR FAST OSO YEL":136, "DOOR S1 FAST OSO BLU":133, "DOOR S1 FAST OSO RED":135, "DOOR S1 FAST OSO YEL":137, # Floors "FLOOR SR DOWN SLOW LNF":60, "FLOOR S1 DOWN SLOW LNF":23, "FLOOR WR DOWN SLOW LNF":82, "FLOOR W1 DOWN SLOW LNF":38, "FLOOR SR DOWN SLOW LNF CPYTEX+TYPE NUM":177, "FLOOR S1 DOWN SLOW LNF CPYTEX+TYPE NUM":159, "FLOOR WR DOWN SLOW LNF CPYTEX+TYPE NUM":84, "FLOOR W1 DOWN SLOW LNF CPYTEX+TYPE NUM":37, "FLOOR SR UP SLOW NNF":69, "FLOOR S1 UP SLOW NNF":18, "FLOOR WR UP SLOW NNF":128, "FLOOR W1 UP SLOW NNF":119, "FLOOR SR UP FAST NNF":132, "FLOOR S1 UP FAST NNF":131, "FLOOR WR UP FAST NNF":129, "FLOOR W1 UP FAST NNF":130, "FLOOR SR DOWN SLOW NNF":222, "FLOOR S1 DOWN SLOW NNF":221, "FLOOR WR DOWN SLOW NNF":220, "FLOOR W1 DOWN SLOW NNF":219, "FLOOR SR UP SLOW LNC":64, "FLOOR S1 UP SLOW LNC":101, "FLOOR WR UP SLOW LNC":91, "FLOOR W1 UP SLOW LNC":5, "FLOOR G1 UP SLOW LNC":24, "FLOOR SR UP SLOW LNC-8 CRUSH":65, "FLOOR S1 UP SLOW LNC-8 CRUSH":55, "FLOOR WR UP SLOW LNC-8 CRUSH":94, "FLOOR W1 UP SLOW LNC-8 CRUSH":56, "FLOOR SR DOWN SLOW HNF":45, "FLOOR S1 DOWN SLOW HNF":102, "FLOOR WR DOWN SLOW HNF":83, "FLOOR W1 DOWN SLOW HNF":19, "FLOOR SR DOWN FAST HNF+8":70, "FLOOR S1 DOWN FAST HNF+8":71, "FLOOR WR DOWN FAST HNF+8":98, "FLOOR W1 DOWN FAST HNF+8":36, "FLOOR SR UP SLOW 24":180, "FLOOR S1 UP SLOW 24":161, "FLOOR WR UP SLOW 24":92, "FLOOR W1 UP SLOW 24":58, "FLOOR SR UP SLOW 24 CPYTEX+TYPE TRIG":179, "FLOOR S1 UP SLOW 24 CPYTEX+TYPE TRIG":160, "FLOOR WR UP SLOW 24 CPYTEX+TYPE TRIG":93, "FLOOR W1 UP SLOW 24 CPYTEX+TYPE TRIG":59, "FLOOR SR UP SLOW SLT":176, "FLOOR S1 UP SLOW SLT":158, "FLOOR WR UP SLOW SLT":96, "FLOOR W1 UP SLOW SLT":30, "FLOOR SR UP SLOW 512":178, "FLOOR S1 UP SLOW 512":140, "FLOOR WR UP SLOW 512":147, "FLOOR W1 UP SLOW 512":142, "FLOOR SR NOMOVE CPYTEX+TYPE SLT TRIG":190, "FLOOR S1 NOMOVE CPYTEX+TYPE SLT TRIG":189, "FLOOR WR NOMOVE CPYTEX+TYPE SLT TRIG":154, "FLOOR W1 NOMOVE CPYTEX+TYPE SLT TRIG":153, "FLOOR SR NOMOVE CPYTEX+TYPE SLT NUM":78, "FLOOR S1 NOMOVE CPYTEX+TYPE SLT NUM":241, "FLOOR WR NOMOVE CPYTEX+TYPE SLT NUM":240, "FLOOR W1 NOMOVE CPYTEX+TYPE SLT NUM":239, # Ceilings "CEIL SR DOWN FAST FLR":43, "CEIL S1 DOWN FAST FLR":41, "CEIL WR DOWN FAST FLR":152, "CEIL W1 DOWN FAST FLR":145, "CEIL SR UP SLOW HNC":186, "CEIL S1 UP SLOW HNC":166, "CEIL WR UP SLOW HNC":151, "CEIL W1 UP SLOW HNC":40, "CEIL SR DOWN SLOW F+8":187, "CEIL S1 DOWN SLOW F+8":167, "CEIL WR DOWN SLOW F+8":72, "CEIL W1 DOWN SLOW F+8":44, "CEIL SR DOWN SLOW LNC":205, "CEIL S1 DOWN SLOW LNC":203, "CEIL WR DOWN SLOW LNC":201, "CEIL W1 DOWN SLOW LNC":199, "CEIL SR DOWN SLOW HNF":205, "CEIL S1 DOWN SLOW HNF":204, "CEIL WR DOWN SLOW HNF":202, "CEIL W1 DOWN SLOW HNF":200, # Platforms and lifts "PLAT SR SLOW CPYTEX TRIG 24":66, "PLAT S1 SLOW CPYTEX TRIG 24":15, "PLAT WR SLOW CPYTEX TRIG 24":148, "PLAT W1 SLOW CPYTEX TRIG 24":143, "PLAT SR SLOW CPYTEX TRIG 32":67, "PLAT S1 SLOW CPYTEX TRIG 32":14, "PLAT WR SLOW CPYTEX TRIG 32":149, "PLAT W1 SLOW CPYTEX TRIG 32":144, "PLAT SR SLOW CPYTEX+DELTYPE TRIG NAF":68, "PLAT S1 SLOW CPYTEX+DELTYPE TRIG NAF":20, "PLAT WR SLOW CPYTEX+DELTYPE TRIG NAF":95, "PLAT W1 SLOW CPYTEX+DELTYPE TRIG NAF":22, "PLAT G1 SLOW CPYTEX+DELTYPE TRIG NAF":47, "PLAT SR SLOW 3SEC PERP":181, "PLAT S1 SLOW 3SEC PERP":162, "PLAT WR SLOW 3SEC PERP":87, "PLAT W1 SLOW 3SEC PERP":53, "PLAT SR STOP":182, "PLAT S1 STOP":163, "PLAT WR STOP":89, "PLAT W1 STOP":54, "PLAT SR SLOW 3SEC LNF":62, "PLAT S1 SLOW 3SEC LNF":21, "PLAT WR SLOW 3SEC LNF":88, "PLAT W1 SLOW 3SEC LNF":10, "PLAT SR FAST 3SEC LNF":123, "PLAT S1 FAST 3SEC LNF":122, "PLAT WR FAST 3SEC LNF":120, "PLAT W1 FAST 3SEC LNF":121, "PLAT SR INST CL":211, "PLAT WR INST CL":212, # Crushers "CRUSHER SR SLOW":184, "CRUSHER S1 SLOW":49, "CRUSHER WR SLOW":73, "CRUSHER W1 SLOW":25, "CRUSHER SR FAST":183, "CRUSHER S1 FAST":164, "CRUSHER WR FAST":77, "CRUSHER W1 FAST":6, "CRUSHER SR SLOW SILENT":185, "CRUSHER S1 SLOW SILENT":165, "CRUSHER WR SLOW SILENT":150, "CRUSHER W1 SLOW SILENT":141, "CRUSHER SR STOP":188, "CRUSHER S1 STOP":168, "CRUSHER WR STOP":74, "CRUSHER W1 STOP":57, # Stairs "STAIR SR UP SLOW 8":258, "STAIR S1 UP SLOW 8":7, "STAIR WR UP SLOW 8":256, "STAIR W1 UP SLOW 8":8, "STAIR SR UP FAST 16":259, "STAIR S1 UP FAST 16":127, "STAIR WR UP FAST 16":257, "STAIR W1 UP FAST 16":100, # Boom elevators "ELEVATOR SR FAST NHF":230, "ELEVATOR S1 FAST NHF":229, "ELEVATOR WR FAST NHF":228, "ELEVATOR W1 FAST NHF":227, "ELEVATOR SR FAST NHF":234, "ELEVATOR S1 FAST NLF":233, "ELEVATOR WR FAST NLF":232, "ELEVATOR W1 FAST NLF":231, "ELEVATOR SR FAST CURRF":238, "ELEVATOR S1 FAST CURRF":237, "ELEVATOR WR FAST CURRF":236, "ELEVATOR W1 FAST CURRF":235, # Lighting "LIGHT SR 35":139, "LIGHT S1 35":170, "LIGHT WR 35":79, "LIGHT W1 35":35, "LIGHT SR 255":138, "LIGHT S1 255":171, "LIGHT WR 255":81, "LIGHT W1 255":13, "LIGHT SR MAXN":192, "LIGHT S1 MAXN":169, "LIGHT WR MAXN":80, "LIGHT W1 MAXN":12, "LIGHT SR MINN":194, "LIGHT S1 MINN":173, "LIGHT WR MINN":157, "LIGHT W1 MINN":104, "LIGHT SR BLINK":193, "LIGHT S1 BLINK":172, "LIGHT WR BLINK":156, "LIGHT W1 BLINK":17, # Exits "EXIT S1":11, "EXIT W1":52, "EXIT G1":197, "EXIT S1 SECRET":51, "EXIT W1 SECRET":124, "EXIT G1 SECRET":198, # Teleports "TELEPORT SR":195, "TELEPORT S1":174, "TELEPORT WR":97, "TELEPORT W1":39, "TELEPORT WR MONSTER":126, "TELEPORT W1 MONSTER":125, "TELEPORT SR MONSTER":269, "TELEPORT S1 MONSTER":268, "TELEPORT SR SILENT":210, "TELEPORT S1 SILENT":209, "TELEPORT WR SILENT":208, "TELEPORT W1 SILENT":207, "TELEPORT WR SILENT LINE":244, "TELEPORT W1 SILENT LINE":243, "TELEPORT WR SILENT LINE REVERSE":263, "TELEPORT W1 SILENT LINE REVERSE":262, "TELEPORT WR SILENT LINE MONSTER":267, "TELEPORT W1 SILENT LINE MONSTER":266, "TELEPORT WR SILENT LINE REVERSE MONSTER":265, "TELEPORT W1 SILENT LINE REVERSE MONSTER":264, # Donuts "DONUT SR":191, "DONUT S1":9, "DONUT WR":155, "DONUT W1":146, # Boom property transfer "TRANSFER FLIGHT":213, "TRANSFER CLIGHT":261, "TRANSFER TRANSLUCENCY":260, "TRANSFER HEIGHTS":242, "TRANSFER FRICTION":223, "TRANSFER WIND":224, "TRANSFER CURRENT":225, "TRANSFER POINTFORCE":226, # Scrollers "SCROLL CL":250, "SCROLL FLR":251, "SCROLL CARRY":252, "SCROLL FLR+CARRY":253, "SCROLL WALL SYNCED":254, "SCROLL WALL OFFSETS":255, "SCROLL WALL RIGHT":85, "SCROLL WALL LEFT":48, "SCROLL CL WRTSECTOR":245, "SCROLL FLR WRTSECTOR":246, "SCROLL CARRY WRTSECTOR":247, "SCROLL F+CARRY WRTSECTOR":248, "SCROLL WALL WRTSECTOR":249, "SCROLL CL ACCEL":214, "SCROLL FLR ACCEL":215, "SCROLL CARRY ACCEL":216, "SCROLL FLR+CARRY ACCEL":217, "SCROLL WALL ACCEL":218 } num2desc = {} for d, n in desc2num.items(): num2desc[n] = d del(d) del(n) trigcompat = \ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] def check_compat(num): """Check the compatibility for a trigger number""" if 8192 <= num < 32768: return "BOOM GENERALIZED" try: return ["UNKNOWN", "DOOM19", "BOOM EXTENDED"][trigcompat[num]] except: return "UNKNOWN" def decode(n): """Generate a description code for a number.""" d = [] if n < 8192: if n in num2desc: return num2desc[n] return "UNKNOWN" # Boom generalized elif 0x2F80 <= n < 0x3000: n -= 0x2F80 d += ["CRUSHER"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("MONSTER","") [(n&0x0020)>>5]] d += [("SILENT","") [(n&0x00c0)>>6]] elif 0x3000 <= n < 0x3400: n -= 0x3000 d += ["STAIR"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("","MONSTER") [(n&0x0020)>>5]] d += [("4","8","16","24") [(n&0x00c0)>>6]] d += [("DOWN","UP") [(n&0x0100)>>8]] d += [("", "IGNTXT") [(n&0x0200)>>9]] elif 0x3400 <= n < 0x3800: n -= 0x3400 d += ["PLATFORM"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("MONSTER","") [(n&0x0020)>>5]] d += [("1","3","5","10") [(n&0x00c0)>>6]] d += [("LNF","NNF","LNC","PERP") [(n&0x0300)>>8]] elif 0x3800 <= n < 0x3c00: n -= 0x3800 d += ["DOOR"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("OWC","OSO") [(n&0x0020)>>5]] d += [("ANY","RED","YELLOW","BLUE","RED", "BLUE","YELLOW","ALL") [(n&0x01c0)>>6]] d += [("3KEYS","6KEYS") [(n&0x0200)>>9]] elif 0x3c00 <= n < 0x4000: n -= 0x3c00 d += ["DOOR"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("OWC","OSO","CWO","CSC") [(n&0x0060)>>5]] d += [("MONSTER","") [(n&0x0080)>>7]] d += [("1SECS","4SECS","9SECS","30SECS") [(n&0x0300)>>8]] elif 0x4000 <= n < 0x6000: n -= 0x4000 d += ["CEIL"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("TRIG","NUM") [(n&0x0020)>>5]] d += [("DOWN","UP") [(n&0x0040)>>6]] d += [("HNC","LNC","NNC","HNF","FLR", "SUT","24","32") [(n&0x0380)>>7]] d += [("","CPYTEX+DELTYPE","CPYTEX","CHGTYPE") [(n&0x0c00)>>10]] d += [("CRUSH","") [(n&0x1000)>>12]] elif 0x6000 <= n < 0x8000: n -= 0x6000 d += ["FLOOR"] d += [("W1","WR","S1","SR","G1","GR","P1","PR") [n&0x0007]] d += [("SLOW","NORMAL","FAST","TURBO") [(n&0x0018)>>3]] d += [("TRIG","NUM") [(n&0x0020)>>5]] d += [("DOWN","UP") [(n&0x0040)>>6]] d += [("HNF","LNF","NNF","LNC","CL", "SLT","24","32") [(n&0x0380)>>7]] d += [("","CPYTEX+DELTYPE","CPYTEX","CHGTYPE") [(n&0x0c00)>>10]] d += [("CRUSH","") [(n&0x1000)>>12]] # Bit of a hack, but works return (" ".join(d)).replace(" "," ").rstrip(" ") def encode_std(desc): """Encode an exact description of a trigger into its corresponding number. For inexact descriptions, use find_std.""" try: return desc2num[desc.upper()] except: raise Exception("Description not recognized") def encode_gen(desc): """Encode a generalized (Boom) trigger description to a trigger number. Invalid or incompatible terms get converted to the default value.""" desc = desc.upper() num = 0 def pk(seq, shift): for i in range(len(seq)): if seq[i] in desc: return i << shift return 0 num |= pk(("W1","WR","S1","SR","G1","GR","P1","PR"), 0) num |= pk(("SLOW","NORMAL","FAST","TURBO"), 3) if ("FLOOR" in desc) or ("CEIL" in desc): num |= pk(("TRIG","NUM"), 5) num |= pk(("DOWN","UP"), 6) num |= pk(("xx","CPYTEX+DELTYPE","CPYTEX","CHGTYPE"), 10) num |= pk(("CRUSH",), 12) if "FLOOR" in desc: num |= pk(("HNF","LNF","NNF","LNC","CL","SLT","24","32"), 7) num += 0x6000 else: num |= pk(("HNC","LNC","NNC","HNF","FLR","SUT","24","32"), 7) num += 0x4000 elif "CRUSHER" in desc: num |= pk(("MONSTER",), 5) num |= pk(("SILENT",), 6) num += 0x2F80 elif "STAIR" in desc: num |= pk(("xx","MONSTER"), 5) num |= pk(("4","8","16","24"), 6) num |= pk(("DOWN","UP"), 8) num |= pk(("xx","IGNTXT"), 9) num += 0x3000 elif "PLATFORM" in desc: num |= pk(("MONSTER",), 5) num |= pk(("1","3","5","10"), 6) num |= pk(("LNF","NNF","LNC","PERP"), 8) num += 0x3400 elif "DOOR" in desc: num |= pk(("SLOW","NORMAL","FAST","TURBO"), 3) if ("BLU" in desc) or ("YEL" in desc) or ("RED" in desc) or\ ("ALL" in desc) or ("ANY" in desc): num |= pk(("OWC","OSO"), 5) num |= pk(("ANY","RED","YELLOW","BLUE","RED","BLUE","YELLOW","ALL"), 6) num |= pk(("3KEYS","6KEYS"), 9) num += 0x3800 else: num |= pk(("OWC","OSO","CWO","CSC"), 5) num |= pk(("MONSTER",), 7) num |= pk(("1SECS","4SECS","9SECS","30SECS"), 8) num += 0x3c00 else: raise LookupError("Insufficient information provided") return num def find_std(desc): """Search the standard (non-generalized) triggers. A list of matches is returned. All terms must match. Wildcards are allowed. Example: find_std("CEIL UP S?") should return: ['CEIL S1 UP SLOW HNC', 'CEIL SR UP SLOW HNC']""" desc = desc.upper() terms = desc.split() matches = [] for dsc in num2desc.values(): d = dsc.split() matchedterms = 0 for term in terms: for key in d: if fnmatchcase(key, term): matchedterms += 1 if matchedterms == len(terms): matches.append(dsc) return matches __all__ = [find_std, encode_std, encode_gen, decode, check_compat]
# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import functools import logging import os import sys import time import traceback import fixtures from oslo_utils import timeutils import prettytable import six from tempest.lib import base from tempest.lib.common import ssh as connection from tempest.lib import exceptions as exc from sahara_tests.scenario import clients from sahara_tests.scenario import timeouts from sahara_tests.scenario import utils from sahara_tests.utils import crypto as ssh from sahara_tests.utils import url as utils_url logger = logging.getLogger('swiftclient') logger.setLevel(logging.CRITICAL) CHECK_OK_STATUS = "OK" CHECK_FAILED_STATUS = "FAILED" CLUSTER_STATUS_ACTIVE = "Active" CLUSTER_STATUS_ERROR = "Error" HEALTH_CHECKS = ["RED", "YELLOW", "GREEN"] def track_result(check_name, exit_with_error=True): def decorator(fct): @functools.wraps(fct) def wrapper(self, *args, **kwargs): started_at = timeutils.utcnow() test_info = { 'check_name': check_name, 'status': CHECK_OK_STATUS, 'start_time': started_at, 'duration': None, 'traceback': None, 'exception_time': None } self._results.append(test_info) try: return fct(self, *args, **kwargs) except Exception: test_info['exception_time'] = timeutils.utcnow().strftime( '%Y%m%d_%H%M%S') test_info['status'] = CHECK_FAILED_STATUS test_info['traceback'] = traceback.format_exception( *sys.exc_info()) if exit_with_error: raise finally: test_time = timeutils.utcnow() - started_at test_info['duration'] = test_time.seconds return wrapper return decorator class BaseTestCase(base.BaseTestCase): @classmethod def setUpClass(cls): super(BaseTestCase, cls).setUpClass() cls.network = None cls.credentials = None cls.testcase = None cls._results = [] cls.report = False cls.results_dir = '.' cls.default_templ_dir = '.' cls.use_api_v2 = False def setUp(self): super(BaseTestCase, self).setUp() self._init_clients() timeouts.Defaults.init_defaults(self.testcase) self.testcase['ssh_username'] = self.sahara.register_image( self.glance.get_image_id(self.testcase['image']), self.testcase).username self.key = self.testcase.get('key_name') if self.key is None: self.private_key, self.public_key = ssh.generate_key_pair() self.key_name = self.__create_keypair() # save the private key if retain_resources is specified # (useful for debugging purposes) if self.testcase['retain_resources'] or self.key is None: private_key_file_name = os.path.join(self.results_dir, self.key_name + '.key') with open(private_key_file_name, 'w+') as private_key_file: private_key_file.write(self.private_key) os.chmod(private_key_file_name, 0o600) self.plugin_version_option = 'plugin_version' if not self.use_api_v2: self.plugin_version_option = 'hadoop_version' self.plugin_opts = { 'plugin_name': self.testcase['plugin_name'], self.plugin_version_option: self.testcase['plugin_version'] } self.cinder = True self.proxy = False def _init_clients(self): username = self.credentials['os_username'] password = self.credentials['os_password'] tenant_name = self.credentials['os_tenant'] auth_url = self.credentials['os_auth_url'] sahara_service_type = self.credentials['sahara_service_type'] sahara_url = self.credentials['sahara_url'] auth_version = '3.0' if 'v3' in auth_url else '2.0' session = clients.get_session(auth_url, username, password, tenant_name, self.credentials.get('ssl_verify', False), self._get_file_with_defaults( self.credentials.get('ssl_cert'))) api_version = '2' if self.use_api_v2 else '1.1' self.sahara = clients.SaharaClient(session=session, service_type=sahara_service_type, sahara_url=sahara_url, api_version=api_version) self.nova = clients.NovaClient(session=session) self.neutron = clients.NeutronClient(session=session) # swiftclient doesn't support keystone sessions self.swift = clients.SwiftClient( auth_version=auth_version, authurl=auth_url, user=username, key=password, insecure=not self.credentials.get('ssl_verify', False), cacert=self.credentials.get('ssl_cert'), tenant_name=tenant_name) self.glance = clients.GlanceClient(session=session) # boto is not an OpenStack client, but we can handle it as well self.boto = None if self.credentials.get("s3_endpoint", None): self.boto = clients.BotoClient( endpoint=self.credentials["s3_endpoint"], accesskey=self.credentials["s3_accesskey"], secretkey=self.credentials["s3_secretkey"]) def create_cluster(self): self.cluster_id = self.sahara.get_cluster_id( self.testcase.get('existing_cluster')) self.ng_id_map = {} if self.cluster_id is None: self.ng_id_map = self._create_node_group_templates() cl_tmpl_id = self._create_cluster_template() self.cluster_id = self._create_cluster(cl_tmpl_id) elif self.key is None: self.cinder = False self._poll_cluster_status_tracked(self.cluster_id) cluster = self.sahara.get_cluster(self.cluster_id, show_progress=True) self._get_proxy(cluster) self.check_cinder() if self.check_feature_available("provision_progress"): self._check_event_logs(cluster) def _get_proxy(self, cluster): for ng in cluster.node_groups: if ng['is_proxy_gateway']: for instance in ng['instances']: if instance['management_ip'] != ( instance['internal_ip']): self.proxy = instance['management_ip'] @track_result("Check transient") def check_transient(self): with fixtures.Timeout( timeouts.Defaults.instance.timeout_check_transient, gentle=True): while True: if self.sahara.is_resource_deleted( self.sahara.get_cluster_status, self.cluster_id): break time.sleep(5) def _inject_datasources_data(self, arg, input_url, output_url): return arg.format( input_datasource=input_url, output_datasource=output_url) def _put_io_data_to_configs(self, configs, input_id, output_id): input_url, output_url = None, None if input_id is not None: input_url = self.sahara.get_datasource( data_source_id=input_id).url if output_id is not None: output_url = self.sahara.get_datasource( data_source_id=output_id).url pl = lambda x: self._inject_datasources_data(x, input_url, output_url) args = list(map(pl, configs.get('args', []))) configs['args'] = args return configs def _prepare_job_running(self, job): input_id, output_id = self._create_datasources(job) main_libs, additional_libs = self._create_job_binaries(job) job_id = self._create_job(job['type'], main_libs, additional_libs) configs = self._parse_job_configs(job) configs = self._put_io_data_to_configs( configs, input_id, output_id) return [job_id, input_id, output_id, configs] @track_result("Check EDP jobs", False) def check_run_jobs(self): batching = self.testcase.get('edp_batching', len(self.testcase['edp_jobs_flow'])) batching_size = batching jobs = self.testcase.get('edp_jobs_flow', []) pre_exec = [] for job in jobs: pre_exec.append(self._prepare_job_running(job)) batching -= 1 if not batching: self._job_batching(pre_exec) pre_exec = [] batching = batching_size self.check_verification(self.cluster_id) def _job_batching(self, pre_exec): job_exec_ids = [] for job_exec in pre_exec: job_exec_ids.append(self._run_job(*job_exec)) self._poll_jobs_status(job_exec_ids) def _create_datasources(self, job): def create(ds, name): credential_vars = {} source = ds.get('source', None) destination = None if source else utils.rand_name( ds['destination']) if ds['type'] == 'swift': url = self._create_swift_data(source, destination) credential_vars = { 'credential_user': self.credentials['os_username'], 'credential_pass': self.credentials['os_password'] } elif ds['type'] == 's3': url = self._create_s3_data(source, destination) credential_vars = { 's3_credentials': { 'accesskey': self.credentials['s3_accesskey'], 'secretkey': self.credentials['s3_secretkey'], 'endpoint': utils_url.url_schema_remover( self.credentials['s3_endpoint']), 'ssl': self.credentials['s3_endpoint_ssl'], 'bucket_in_path': self.credentials['s3_bucket_path'] } } elif ds['type'] == 'hdfs': url = self._create_dfs_data(source, destination, self.testcase.get('hdfs_username', 'hadoop'), ds['type']) elif ds['type'] == 'maprfs': url = self._create_dfs_data(source, destination, ds.get('maprfs_username', 'mapr'), ds['type']) return self.__create_datasource( name=utils.rand_name(name), description='', data_source_type=ds['type'], url=url, **credential_vars) input_id, output_id = None, None if job.get('input_datasource'): ds = job['input_datasource'] input_id = create(ds, 'input') if job.get('output_datasource'): ds = job['output_datasource'] output_id = create(ds, 'output') return input_id, output_id def _create_job_binaries(self, job): main_libs = [] additional_libs = [] if job.get('main_lib'): main_libs.append(self._create_job_binary(job['main_lib'])) for add_lib in job.get('additional_libs', []): lib_id = self._create_job_binary(add_lib) additional_libs.append(lib_id) return main_libs, additional_libs def _create_job_binary(self, job_binary): url = None extra = {} if job_binary['type'] == 'swift': url = self._create_swift_data(job_binary['source']) extra['user'] = self.credentials['os_username'] extra['password'] = self.credentials['os_password'] elif job_binary['type'] == 's3': url = self._create_s3_data(job_binary['source']) extra['accesskey'] = self.credentials['s3_accesskey'] extra['secretkey'] = self.credentials['s3_secretkey'] extra['endpoint'] = self.credentials['s3_endpoint'] elif job_binary['type'] == 'database': url = self._create_internal_db_data(job_binary['source']) job_binary_name = '%s-%s' % ( utils.rand_name('test'), os.path.basename(job_binary['source'])) return self.__create_job_binary(job_binary_name, url, '', extra) def _create_job(self, type, mains, libs): return self.__create_job(utils.rand_name('test'), type, mains, libs, '') def _parse_job_configs(self, job): configs = {} if job.get('configs'): configs['configs'] = {} for param, value in six.iteritems(job['configs']): configs['configs'][param] = str(value) if job.get('args'): configs['args'] = list(map(str, job['args'])) return configs def _run_job(self, job_id, input_id, output_id, configs): return self.__run_job(job_id, self.cluster_id, input_id, output_id, configs) def _poll_jobs_status(self, exec_ids): try: with fixtures.Timeout( timeouts.Defaults.instance.timeout_poll_jobs_status, gentle=True): success = False polling_ids = list(exec_ids) while not success: current_ids = list(polling_ids) success = True for exec_id in polling_ids: status = self.sahara.get_job_status(exec_id) if status not in ['FAILED', 'KILLED', 'DONEWITHERROR', "SUCCEEDED"]: success = False else: current_ids.remove(exec_id) polling_ids = list(current_ids) time.sleep(5) finally: report = [] for exec_id in exec_ids: status = self.sahara.get_job_status(exec_id) if status != "SUCCEEDED": info = self.sahara.get_job_info(exec_id) report.append("Job with id={id}, name={name}, " "type={type} has status " "{status}".format(id=exec_id, name=info.name, type=info.type, status=status)) if report: self.fail("\n".join(report)) def _get_file_with_defaults(self, file_path): """ Check if the file exists; if it is a relative path, check also among the default files. """ if not file_path: return '' all_files = [file_path] if not os.path.isabs(file_path): # relative path: look into default templates too, if defined default_file = os.path.join(self.default_templ_dir, file_path) if os.path.abspath(default_file) != os.path.abspath(file_path): all_files.append(default_file) for checked_file in all_files: if os.path.isfile(checked_file): return checked_file raise Exception('File %s not found while looking into %s' % (file_path, all_files)) def _read_source_file(self, source): if not source: return None with open(self._get_file_with_defaults(source), 'rb') as source_fd: data = source_fd.read() return data def _create_swift_data(self, source=None, destination=None): container = self._get_swift_container() path = utils.rand_name(destination if destination else 'test') data = self._read_source_file(source) self.__upload_to_container(container, path, data) return 'swift://%s.sahara/%s' % (container, path) def _create_s3_data(self, source=None, destination=None): bucket = self._get_s3_bucket() path = utils.rand_name(destination if destination else 'test') data = self._read_source_file(source) self.__upload_to_bucket(bucket, path, data) return 's3://%s/%s' % (bucket, path) def _create_dfs_data(self, source, destination, hdfs_username, fs): def to_hex_present(string): return "".join(map(lambda x: hex(ord(x)).replace("0x", "\\x"), string.decode('utf-8'))) if destination: return destination command_prefixes = {'hdfs': 'hdfs dfs', 'maprfs': 'hadoop fs'} hdfs_dir = utils.rand_name("/user/%s/data" % hdfs_username) instances = self._get_nodes_with_process('namenode') if len(instances) == 0: instances = self._get_nodes_with_process('CLDB') inst_ip = instances[0]["management_ip"] self._run_command_on_node( inst_ip, "sudo su - -c \"%(prefix)s -mkdir -p %(path)s \" %(user)s" % { "prefix": command_prefixes[fs], "path": hdfs_dir, "user": hdfs_username}) hdfs_filepath = utils.rand_name(hdfs_dir + "/file") data = self._read_source_file(source) if not data: data = '' self._run_command_on_node( inst_ip, ("echo -e \"%(data)s\" | sudo su - -c \"%(prefix)s" " -put - %(path)s\" %(user)s") % { "data": to_hex_present(data), "prefix": command_prefixes[fs], "path": hdfs_filepath, "user": hdfs_username}) return hdfs_filepath def _create_internal_db_data(self, source): data = self._read_source_file(source) id = self.__create_internal_db_data(utils.rand_name('test'), data) return 'internal-db://%s' % id def _get_swift_container(self): if not getattr(self, '__swift_container', None): self.__swift_container = self.__create_container( utils.rand_name('sahara-tests')) return self.__swift_container def _get_s3_bucket(self): if not getattr(self, '__s3_bucket', None): self.__s3_bucket = self.__create_bucket( utils.rand_name('sahara-tests')) return self.__s3_bucket @track_result("Cluster scaling", False) def check_scale(self): scale_ops = [] ng_before_scale = self.sahara.get_cluster(self.cluster_id).node_groups scale_ops = self.testcase['scaling'] body = {} for op in scale_ops: node_scale = op['node_group'] if op['operation'] == 'add': if 'add_node_groups' not in body: body['add_node_groups'] = [] body['add_node_groups'].append({ 'node_group_template_id': self.ng_id_map.get(node_scale, self.sahara.get_node_group_template_id( node_scale)), 'count': op['size'], 'name': utils.rand_name(node_scale) }) if op['operation'] == 'resize': if 'resize_node_groups' not in body: body['resize_node_groups'] = [] body['resize_node_groups'].append({ 'name': self.ng_name_map.get( node_scale, self.sahara.get_node_group_template_id(node_scale)), 'count': op['size'] }) if body: self.sahara.scale_cluster(self.cluster_id, body) self._poll_cluster_status(self.cluster_id) ng_after_scale = self.sahara.get_cluster( self.cluster_id).node_groups self._validate_scaling(ng_after_scale, self._get_expected_count_of_nodes( ng_before_scale, body)) def _validate_scaling(self, after, expected_count): for (key, value) in six.iteritems(expected_count): ng = {} for after_ng in after: if after_ng['name'] == key: ng = after_ng break self.assertEqual(value, ng.get('count', 0)) def _get_expected_count_of_nodes(self, before, body): expected_mapper = {} for ng in before: expected_mapper[ng['name']] = ng['count'] for ng in body.get('add_node_groups', []): expected_mapper[ng['name']] = ng['count'] for ng in body.get('resize_node_groups', []): expected_mapper[ng['name']] = ng['count'] return expected_mapper @track_result("Check cinder volumes") def check_cinder(self): if not self._get_node_list_with_volumes() or not self.cinder: print("All tests for Cinder were skipped") return for node_with_volumes in self._get_node_list_with_volumes(): volume_count_on_node = int(self._run_command_on_node( node_with_volumes['node_ip'], 'mount | grep %s | wc -l' % node_with_volumes['volume_mount_prefix'] )) self.assertEqual( node_with_volumes['volume_count'], volume_count_on_node, 'Some volumes were not mounted to node.\n' 'Expected count of mounted volumes to node is %s.\n' 'Actual count of mounted volumes to node is %s.' % (node_with_volumes['volume_count'], volume_count_on_node) ) def _get_node_list_with_volumes(self): node_groups = self.sahara.get_cluster(self.cluster_id).node_groups node_list_with_volumes = [] for node_group in node_groups: if node_group['volumes_per_node'] != 0: for instance in node_group['instances']: node_list_with_volumes.append({ 'node_ip': instance['management_ip'], 'volume_count': node_group['volumes_per_node'], 'volume_mount_prefix': node_group['volume_mount_prefix'] }) return node_list_with_volumes @track_result("Create node group templates") def _create_node_group_templates(self): ng_id_map = {} floating_ip_pool = None security_group = None proxy_exist = False if self.network['public_network']: floating_ip_pool = self.neutron.get_network_id( self.network['public_network']) node_groups = [] for ng in self.testcase['node_group_templates']: node_groups.append(ng) if ng.get('is_proxy_gateway', False): proxy_exist = True for ng in node_groups: kwargs = dict(ng) kwargs.update(self.plugin_opts) kwargs['flavor_id'] = self._get_flavor_id(kwargs['flavor']) del kwargs['flavor'] kwargs['name'] = utils.rand_name(kwargs['name']) if (not proxy_exist) or (proxy_exist and kwargs.get( 'is_proxy_gateway', False)): kwargs['floating_ip_pool'] = floating_ip_pool if not kwargs.get('auto_security_group', True): if security_group is None: sg_name = utils.rand_name('scenario') security_group = self.__create_security_group(sg_name) self.neutron.add_security_group_rule_for_neutron( security_group) kwargs['security_groups'] = [security_group] # boot_from_volume requires APIv2 if kwargs.get('boot_from_volume', False) and not self.use_api_v2: raise Exception('boot_from_volume is set for %s but it ' 'requires APIv2' % (kwargs['name'])) ng_id = self.__create_node_group_template(**kwargs) ng_id_map[ng['name']] = ng_id return ng_id_map @track_result("Set flavor") def _get_flavor_id(self, flavor): if isinstance(flavor, six.string_types): return self.nova.get_flavor_id(flavor) else: # if the name already exists, use it if flavor.get('name'): try: return self.nova.get_flavor_id(flavor['name']) except exc.NotFound: print("Custom flavor %s not found, it will be created" % (flavor['name'])) flavor_id = self.nova.create_flavor(flavor).id self.addCleanup(self.nova.delete_flavor, flavor_id) return flavor_id @track_result("Create cluster template") def _create_cluster_template(self): self.ng_name_map = {} template = self.testcase['cluster_template'] kwargs = dict(template) ngs = kwargs['node_group_templates'] del kwargs['node_group_templates'] kwargs['node_groups'] = [] for ng, count in ngs.items(): ng_name = utils.rand_name(ng) self.ng_name_map[ng] = ng_name kwargs['node_groups'].append({ 'name': ng_name, 'node_group_template_id': self.ng_id_map[ng], 'count': count}) kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs.get('name', 'ct')) kwargs['net_id'] = self.neutron.get_network_id( self.network['private_network']) return self.__create_cluster_template(**kwargs) @track_result("Check event logs") def _check_event_logs(self, cluster): invalid_steps = [] if cluster.is_transient: # skip event log testing return for step in cluster.provision_progress: if not step['successful']: invalid_steps.append(step) if len(invalid_steps) > 0: invalid_steps_info = "\n".join(six.text_type(e) for e in invalid_steps) steps_info = "\n".join(six.text_type(e) for e in cluster.provision_progress) raise exc.TempestException( "Issues with event log work: " "\n Incomplete steps: \n\n {invalid_steps}" "\n All steps: \n\n {steps}".format( steps=steps_info, invalid_steps=invalid_steps_info)) @track_result("Create cluster") def _create_cluster(self, cluster_template_id): if self.testcase.get('cluster'): kwargs = dict(self.testcase['cluster']) else: kwargs = {} # default template kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs.get('name', 'test')) kwargs['cluster_template_id'] = cluster_template_id kwargs['default_image_id'] = self.glance.get_image_id( self.testcase['image']) kwargs['user_keypair_id'] = self.key_name return self.__create_cluster(**kwargs) @track_result("Check cluster state") def _poll_cluster_status_tracked(self, cluster_id): self._poll_cluster_status(cluster_id) def _poll_cluster_status(self, cluster_id): with fixtures.Timeout( timeouts.Defaults.instance.timeout_poll_cluster_status, gentle=True): while True: status = self.sahara.get_cluster_status(cluster_id) if status == CLUSTER_STATUS_ACTIVE: break if status == CLUSTER_STATUS_ERROR: cluster = self.sahara.get_cluster(cluster_id) failure_desc = cluster.status_description message = ("Cluster in %s state with" " a message below:\n%s") % (status, failure_desc) raise exc.TempestException(message) time.sleep(3) def _run_command_on_node(self, node_ip, command): host_ip = node_ip if self.proxy: host_ip = self.proxy command = ("echo '{pkey}' > {filename} && chmod 600 {filename} && " "ssh -o StrictHostKeyChecking=no {ip} -i {filename} " "'{cmd}' && rm {filename}".format( pkey=self.private_key, filename='scenario.pem', ip=node_ip, cmd=command)) ssh_session = connection.Client(host_ip, self.testcase['ssh_username'], pkey=self.private_key) return ssh_session.exec_command(command) def _get_nodes_with_process(self, process=None): if process is not None: process = process.lower() nodegroups = self.sahara.get_cluster(self.cluster_id).node_groups nodes_with_process = [] for nodegroup in nodegroups: for node_process in nodegroup['node_processes']: if not process or process in node_process.lower(): nodes_with_process.extend(nodegroup['instances']) return nodes_with_process def _get_health_status(self, cluster): try: return cluster.verification['status'] except (AttributeError, KeyError): return 'UNKNOWN' def _poll_verification_status(self, cluster_id): with fixtures.Timeout( timeouts.Defaults.instance.timeout_poll_cluster_status, gentle=True): while True: cluster = self.sahara.get_cluster(cluster_id) status = self._get_health_status(cluster) if status == 'UNKNOWN': print("Cluster verification did not start") break if status in HEALTH_CHECKS: break time.sleep(3) @track_result("Check cluster verification") def check_verification(self, cluster_id): if self.check_feature_available("verification"): self._poll_cluster_status(cluster_id) # need to check if previous verification check is not # in the status CHECKING self._poll_verification_status(cluster_id) self.sahara.start_cluster_verification(cluster_id) # check if this verification check finished without errors self._poll_verification_status(cluster_id) else: print("All tests for cluster verification were skipped") # client ops def __create_node_group_template(self, *args, **kwargs): id = self.sahara.create_node_group_template(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_node_group_template, id) return id def __create_security_group(self, sg_name): id = self.neutron.create_security_group_for_neutron(sg_name) if not self.testcase['retain_resources']: self.addCleanup(self.neutron.delete_security_group_for_neutron, id) return id def __create_cluster_template(self, *args, **kwargs): id = self.sahara.create_cluster_template(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_cluster_template, id) return id def __create_cluster(self, *args, **kwargs): id = self.sahara.create_cluster(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_cluster, id) return id def __create_datasource(self, *args, **kwargs): id = self.sahara.create_datasource(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_datasource, id) return id def __create_internal_db_data(self, *args, **kwargs): id = self.sahara.create_job_binary_internal(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_job_binary_internal, id) return id def __create_job_binary(self, *args, **kwargs): id = self.sahara.create_job_binary(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_job_binary, id) return id def __create_job(self, *args, **kwargs): id = self.sahara.create_job_template(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_job_template, id) return id def __run_job(self, *args, **kwargs): id = self.sahara.run_job(*args, **kwargs) if not self.testcase['retain_resources']: self.addCleanup(self.sahara.delete_job_execution, id) return id def __create_container(self, container_name): self.swift.create_container(container_name) if not self.testcase['retain_resources']: self.addCleanup(self.swift.delete_container, container_name) return container_name def __upload_to_container(self, container_name, object_name, data=None): if data: self.swift.upload_data(container_name, object_name, data) if not self.testcase['retain_resources']: self.addCleanup(self.swift.delete_object, container_name, object_name) def __create_bucket(self, bucket_name): self.boto.create_bucket(bucket_name) if not self.testcase['retain_resources']: self.addCleanup(self.boto.delete_bucket, bucket_name) return bucket_name def __upload_to_bucket(self, bucket_name, object_name, data=None): if data: self.boto.upload_data(bucket_name, object_name, data) if not self.testcase['retain_resources']: self.addCleanup(self.boto.delete_object, bucket_name, object_name) def __create_keypair(self): key = utils.rand_name('scenario_key') self.nova.nova_client.keypairs.create(key, public_key=self.public_key) if not self.testcase['retain_resources']: self.addCleanup(self.nova.delete_keypair, key) return key def check_feature_available(self, feature_name): if not getattr(self.sahara.get_cluster(self.cluster_id), feature_name, None): return False return True def tearDown(self): tbs = [] table = prettytable.PrettyTable(["Check", "Status", "Duration, s", "Start time"]) table.align["Check"] = "l" for check in self._results: table.add_row( [check['check_name'], check['status'], check['duration'], check['start_time']]) if check['status'] == CHECK_FAILED_STATUS: tbs.append(check['exception_time']) tbs.extend(check['traceback']) tbs.append("") print("Results of testing plugin", self.plugin_opts['plugin_name'], self.plugin_opts[self.plugin_version_option]) print(table) print("\n".join(tbs), file=sys.stderr) super(BaseTestCase, self).tearDown() test_failed = any([c['status'] == CHECK_FAILED_STATUS for c in self._results]) if self.report: filename = {"time": time.strftime('%Y%m%d%H%M%S', time.localtime())} filename.update(self.plugin_opts) # let's normalize this variable so that we can use # a stable name as formatter later. if 'hadoop_version' in filename: filename['plugin_version'] = filename['hadoop_version'] del filename['hadoop_version'] report_file_name = os.path.join( self.results_dir, '{plugin_name}_{plugin_version}-{time}'.format(**filename)) time.strftime('%Y%m%d%H%M%S', time.localtime()) with open(report_file_name, 'w+') as report_file: report_file.write(str(self._results)) print("Results can be found in %s" % report_file_name) if test_failed: self.fail("Scenario tests failed")
import hashlib from datetime import datetime from werkzeug import generate_password_hash, check_password_hash, \ cached_property from flaskext.sqlalchemy import BaseQuery from flaskext.principal import RoleNeed, UserNeed, Permission from newsmeme.extensions import db from newsmeme.permissions import null from newsmeme.models.permissions import Permissions from newsmeme.models.types import DenormalizedText class UserQuery(BaseQuery): def from_identity(self, identity): """ Loads user from flaskext.principal.Identity instance and assigns permissions from user. A "user" instance is monkeypatched to the identity instance. If no user found then None is returned. """ try: user = self.get(int(identity.name)) except ValueError: user = None if user: identity.provides.update(user.provides) identity.user = user return user def authenticate(self, login, password): user = self.filter(db.or_(User.username==login, User.email==login)).first() if user: authenticated = user.check_password(password) else: authenticated = False return user, authenticated def authenticate_openid(self, email, openid): user = self.filter(User.email==email).first() if user: authenticated = user.check_openid(openid) else: authenticated = False return user, authenticated class User(db.Model): __tablename__ = "users" query_class = UserQuery # user roles MEMBER = 100 MODERATOR = 200 ADMIN = 300 id = db.Column(db.Integer, primary_key=True) username = db.Column(db.Unicode(60), unique=True, nullable=False) email = db.Column(db.String(150), unique=True, nullable=False) karma = db.Column(db.Integer, default=0) date_joined = db.Column(db.DateTime, default=datetime.utcnow) activation_key = db.Column(db.String(80), unique=True) role = db.Column(db.Integer, default=MEMBER) receive_email = db.Column(db.Boolean, default=False) email_alerts = db.Column(db.Boolean, default=False) followers = db.Column(DenormalizedText) following = db.Column(DenormalizedText) _password = db.Column("password", db.String(80)) _openid = db.Column("openid", db.String(80), unique=True) class Permissions(Permissions): @cached_property def send_message(self): if not self.receive_email: return null needs = [UserNeed(user_id) for user_id in self.friends] if not needs: return null return Permission(*needs) def __init__(self, *args, **kwargs): super(User, self).__init__(*args, **kwargs) self.followers = self.followers or set() self.following = self.following or set() def __str__(self): return self.username def __repr__(self): return "<%s>" % self @cached_property def permissions(self): return self.Permissions(self) def _get_password(self): return self._password def _set_password(self, password): self._password = generate_password_hash(password) password = db.synonym("_password", descriptor=property(_get_password, _set_password)) def check_password(self, password): if self.password is None: return False return check_password_hash(self.password, password) def _get_openid(self): return self._openid def _set_openid(self, openid): self._openid = generate_password_hash(openid) openid = db.synonym("_openid", descriptor=property(_get_openid, _set_openid)) def check_openid(self, openid): if self.openid is None: return False return check_password_hash(self.openid, openid) @cached_property def provides(self): needs = [RoleNeed('authenticated'), UserNeed(self.id)] if self.is_moderator: needs.append(RoleNeed('moderator')) if self.is_admin: needs.append(RoleNeed('admin')) return needs @cached_property def num_followers(self): if self.followers: return len(self.followers) return 0 @cached_property def num_following(self): return len(self.following) def is_following(self, user): return user.id in self.following @property def friends(self): return self.following.intersection(self.followers) def is_friend(self, user): return user.id in self.friends def get_friends(self): return User.query.filter(User.id.in_(self.friends)) def follow(self, user): user.followers.add(self.id) self.following.add(user.id) def unfollow(self, user): if self.id in user.followers: user.followers.remove(self.id) if user.id in self.following: self.following.remove(user.id) def get_following(self): """ Return following users as query """ return User.query.filter(User.id.in_(self.following or set())) def get_followers(self): """ Return followers as query """ return User.query.filter(User.id.in_(self.followers or set())) @property def is_moderator(self): return self.role >= self.MODERATOR @property def is_admin(self): return self.role >= self.ADMIN @cached_property def gravatar(self): if not self.email: return '' md5 = hashlib.md5() md5.update(self.email.strip().lower()) return md5.hexdigest() def gravatar_url(self, size=80): if not self.gravatar: return '' return "http://www.gravatar.com/avatar/%s.jpg?s=%d" % ( self.gravatar, size)
""" This module has been written to convert transcribed commentaries from text files to TEI compatible XML. Funding is provided by an ERC funded project studying Arabic commentaries on the Hippocratic Aphorisms. The Principal Investigator is Peter E. Pormann, The University of Manchester. It is anticipated the module will be used via the main.py module which attempts to to process any input file or directory containing files with a .txt extension. Each text file base name should end in an underscore followed by a numerical value, e.g. file_1.txt, file_2.txt, etc. The numerical value is subsequently used when creating the title section ``<div>`` element, e.g. ``<div n="1" type="Title_section">`` for file_1.txt. .. note:: This is optional, by default the version is set at 1. If processing succeeds two XML files will be created in a folder called XML. The XML file names start with the text file base name and end in _main.xml (for the XML files will be file_1_main.xml and file_1_app.xml. If processing fails error messages will be saved in the exegis.log file. The commentaries should be utf-8 text files with the format as documented in the associated documentation (docs/_build/index.html). :Authors: Jonathan Boyle, Nicolas Gruel <nicolas.gruel@manchester.ac.uk> :Copyright: IT Services, The University of Manchester """ # pylint: disable=locally-disabled, invalid-name import os import re from lxml import etree try: from .analysis import references, footnotes, AnalysisException from .introduction import Introduction from .title import Title, TitleException from .footnotes import Footnotes, FootnotesException from .baseclass import Exegis, logger, TEMPLATE_FNAME, RELAXNG_FNAME except ImportError: from analysis import references, footnotes, AnalysisException from introduction import Introduction, IntroductionException from title import Title, TitleException from footnotes import Footnotes, FootnotesException from baseclass import Exegis, logger, TEMPLATE_FNAME, RELAXNG_FNAME # Define an Exception class AphorismsToXMLException(Exception): """Class for exception """ pass class Process(Exegis): """Class to main hypocratic aphorism text to produce a TEI XML file. Attributes ---------- fname : str Name of the file to convert. The text file base name is expected to end with an underscore followed by a numerical value, e.g. file_1.txt, file_2.txt, etc. This numerical value is used when creating the title section <div> element, e.g. <div n="1" type="Title_section"> for file_1.txt. folder : str, optional Name of the folder where are the files to convert doc_num : int, optional version of the document treated. Default value: 1 """ def __init__(self, fname=None, folder=None, doc_num=1): Exegis.__init__(self) self.folder = folder self.fname = fname self.doc_num = doc_num self.template_fname = TEMPLATE_FNAME self.relaxng_fname = None # Create basename file. if self.fname is not None: self.set_basename() else: self.base_name = None self.footnotes_app = None # Initialise footnote number self._next_footnote = 1 # other attributes used self._introduction = '' self._title = '' self._aph_com = {} # aphorism and commentaries self._text = '' self.footnotes = '' self._n_footnote = 1 self.template = '' # Initialisation of the xml_main and xml_app list # They are created here and not in the __init__ to have # the reinitialisation where it is needed. def set_basename(self): """Method to set the basename attribute if fname is not None """ self.base_name = os.path.splitext(os.path.basename(self.fname))[0] # Create folder for XML if not os.path.exists('XML'): os.mkdir('XML') # Set XML file name self.xml_file = os.path.join('XML', self.base_name + '.xml') def open_document(self, fname=None): """Method to open and read the exegis document. Parameters ---------- fname : str, optional name of the file to analyse. Attributes ---------- folder : str, optional Name of the folder where are the files to convert fname : str Name of the file to convert. The text file base name is expected to end with an underscore followed by a numerical value, e.g. file_1.txt, file_2.txt, etc. This numerical value is used when creating the title section <div> element, e.g. <div n="1" type="Title_section"> for file_1.txt. text : str string which contains the whole file in utf-8 format. Raises ------ AphorismsToXMLException if document can not be: - open - there subfolder present in the folder - file not treatable by the software (e.g. .DS_Store) - file does not exist """ if fname is not None: self.folder, self.fname = os.path.split(fname) self.set_basename() if self.base_name is None and self.fname is not None: self.set_basename() if self.folder is None: self.folder = '.' if self.base_name is None: logger.error("There are no file to convert.") raise AphorismsToXMLException full_path = os.path.join(self.folder, self.fname) if os.path.isdir(full_path): logger.info('The software does not treat subfolder.') raise AphorismsToXMLException # Extract the document number, it is expected this is at the end of the # base name following an '_' try: sep, doc_num = self.base_name.rpartition('_')[1:] self.doc_num = int(doc_num) if sep == '': raise AphorismsToXMLException except ValueError: info = ('File name {} does not provide version information. ' 'Use version 1 by default'.format(self.fname)) logger.info(info) # Open the file to process # pylint: disable=locally-disabled, invalid-name try: with open(full_path, 'r', encoding="utf-8") as f: # Read in file self._text = f.read().strip() except UnicodeDecodeError: info = ('File {} is not treatable by the software'.format( self.fname)) logger.info(info) raise AphorismsToXMLException except FileNotFoundError: info = ('File {} does not exist'.format(self.fname)) logger.info(info) raise AphorismsToXMLException def divide_document(self): """Method to divide the document in the three main parts. An exegis document si composed in three or four main parts: - The introduction (optional) - The title - The aphorisms - The footnotes This method will divide the document in the three or four parts. Attributes ---------- _introduction : str A string which contains the introduction of the document if present _title : str A string which contains the title of the document _text : str A string which contains the aphorisms and commentaries of the document _footnotes : str A string which contains the footnotes of the document Raises ------ AphorismsToXMLException if it is not possible to divide the document. """ # Not sure that is the best way to do but this is just a trial # cut the portion of the test, starting from the end, until the # characters footnotes_sep footnotes_sep = '*1*' loc_footnotes = self._text.rfind(footnotes_sep) if loc_footnotes == self._text.find(footnotes_sep): logger.error('Footnote referenced in the text but ' 'no footnote section present.') self.footnotes = '' raise AphorismsToXMLException if loc_footnotes != -1: self.footnotes = self._text[loc_footnotes:].strip() self._text = self._text[:loc_footnotes] else: logger.info('There are no footnotes present.') # Cut the intro (if present) try: p = re.compile(r'\+\+\n') _tmp = p.split(self._text) if len(_tmp) == 3: self._title = _tmp[0].strip() self._introduction = _tmp[1].strip() self._text = _tmp[2].strip() elif len(_tmp) == 2: self._introduction = _tmp[0].strip() self._text = _tmp[1].strip() except ValueError as e: raise AphorismsToXMLException(e) try: p = re.compile(r'\n\s{0,}1\.?\n') if self._title == '': _tmp = p.split(self._text) self._title = _tmp[0] self._text = '1.\n' + '1.\n'.join(_tmp[1:]) except ValueError as e: logger.error('Aphorism should have numeration as 1. or 1') raise AphorismsToXMLException(e) return def aphorisms_dict(self): """Create an order dictionary (OrderedDict object) with the aphorisms and commentaries. Attributes ---------- _aph_com : dict dictionary which contains the aphorisms and the commentaries associated. Raises ------ AphorismsToXMLException if it is not possible to create the dictionary. """ aphorism = re.split(r'\n\s{0,}[0-9]+\.?\n', '\n' + self._text)[1:] # Split the text in function of the numbers (i.e. the separation # of the aphorism. # '\s[0-9]+.\n' means 'find string : # which start with end of line or any space character # with at least on number ending # with a point and a end of line. p = re.compile(r'\n\s{0,}?[0-9]+\.?\n') error = '' try: n_aphorism = [int(i.group().strip('.\t\n ')) for i in p.finditer('\n' + self._text)] # Find missing aphorism or badly written (e.g.: 14-) missing = [i for i in list(range(1, max(n_aphorism))) if i not in n_aphorism] # Find if multiple aphorism with the same number. doublon = list({i for i in n_aphorism if n_aphorism.count(i) > 1}) if not n_aphorism: error = 'There are no aphorisms detected' logger.error(error) if max(n_aphorism) != len(n_aphorism): error = 'N aphorism expected {}, got: {}'.format( n_aphorism[-1], len(n_aphorism) ) logger.error(error) if missing: error = 'Missing or problematic aphorism: {}'.format(missing) logger.error(error) warning = ('Last aphorism can be problematic but ' 'not detected by the software.') logger.warning(warning) if doublon: error = 'Aphorism with same number: {}'.format(doublon) logger.error(error) if error: raise AphorismsToXMLException(error) except ValueError: error = ('Aphorism numeration format probably does not respect ' 'the convention. ' 'It should be a number following by a point') logger.error(error) raise AphorismsToXMLException except AphorismsToXMLException as e: raise AphorismsToXMLException(e) # create the dictionary with the aphorism (not sure that we need # the ordered one) # use n_aphorism to be sure that there are no error try: self._aph_com = {} for i, aph in enumerate(aphorism): self._aph_com[n_aphorism[i]] = [s.strip() for s in aph.split('\n') if len(s) != 0] except (IndexError, AphorismsToXMLException): error = ('Problem in the creation of the dictionary which' 'which contains the aphorisms') logger.error(error) raise AphorismsToXMLException def read_template(self): """Method to read the XML template used for the transformation Attributes ---------- template : str Contain the text of the XML template provided. Raises ------ AphorismsToXMLException if template cannot be found or read. """ # Open the template file. Kill the process if not there. # Template is not optional. try: with open(self.template_fname, 'r', encoding="utf-8") as f: self.template = f.read() info = 'Template file {} found.'.format(self.template_fname) logger.info(info) except FileNotFoundError: error = 'Template file {} not found.'.format(self.template_fname) logger.error(error) raise AphorismsToXMLException if self.relaxng_fname is None: tree = etree.parse(self.template_fname) root = tree.getroot() model = root.xpath("/processing-instruction('xml-model')")[0] self.relaxng_fname = model.text.split('"')[1] logger.info('Relaxng file ' 'use for validation: {} '.format(self.relaxng_fname)) def _create_xml(self): if self.template == '': self.read_template() xml = self.template if self.wits: wits = set(self.wits) wits = list(wits) wits.sort() info = 'Witnesses found in the aphorisms and ' \ 'commentaries {}'.format(wits) logger.info(info) _wits = [] for w in wits: _wits.append(self.xml_oss * self.xml_n_offset + '<witness> {} </witness>'.format(w)) xml = re.sub('#INSERTWITNESSES#', '\n'.join(_wits), xml) if self.xml: xml = re.sub('#INSERTBODY#', '\n'.join(self.xml), xml) if self.app: xml = re.sub('#INSERTAPP#', '\n'.join(self.app), xml) self.xml = xml def _validate_xml(self): try: relaxng_doc = etree.parse(self.relaxng_fname) except OSError: relaxng_doc = etree.parse(RELAXNG_FNAME) self.relaxng_fname = RELAXNG_FNAME relaxng = etree.RelaxNG(relaxng_doc) xml = etree.parse(self.xml_file) # relaxng.validate(xml) # if not relaxng(xml): # logger.error("INVALID") # else: # logger.error(self.xml_file) # logger.error("VALID") try: relaxng.assertValid(xml) logger.info('The document {} created is ' 'valid corresponding ' 'to the Relaxng declared ' 'or used'.format(self.xml_file)) except etree.DocumentInvalid: logger.error('The document {} created is ' 'not valid corresponding ' 'to the Relaxng declared ' 'or used'.format(self.xml_file)) raise AphorismsToXMLException def treat_footnotes(self): """Method to treat Footnote. Work even if division of the document didn't work properly but for the footnotes part. """ if not self.footnotes == '': # In most of the file the footnote will be present and can be # treated independently from the aphorism. # Treat the footnote part and create the XML app try: self.footnotes_app = Footnotes(self.footnotes) except FootnotesException: raise AphorismsToXMLException from None logger.info('Footnotes treated') # Create XML app self.footnotes_app.xml_app() self.app = self.footnotes_app.xml self.wits = self.footnotes_app.wits logger.info('Footnotes app file created') def main(self): """ A function to process a text file containing symbols representing references to witnesses and symbols and footnotes defining textual variations, omissions, additions, correxi or conieci. This function uses these symbols to produce files containing EpiDoc compatible XML. If processing succeeds two XML files will be created in folder ./XML with file names that start with the text file base name and ending in _main.xml (for the main XML) and _apps.xml (for the apparatus XML). For example for file_1.txt the XML files will be file_1_main.xml and file_1_app.xml. Modify the attribute ``xml`` to add the title section in the main XML Raises ------ AphorismsToXMLException if the processing of the file does not work as expected. """ # Open and read the exegis document self.open_document() debug = 'Open document {}'.format(self.fname) logger.debug(debug) # Divide the document in the different part (intro, title, # text, footnotes) try: self.divide_document() logger.info('Division of the document ok.') except AphorismsToXMLException: logger.error('Division of the document failed.') raise AphorismsToXMLException self.treat_footnotes() self.aphorisms_dict() logger.info('Created aphorisms dictionary') if self._introduction != '': try: intro = Introduction(self._introduction, self._next_footnote) intro.xml_main() self._next_footnote = intro.next_footnote self.xml += intro.xml logger.debug('Introduction treated') except IntroductionException: raise AphorismsToXMLException from None # Deal with the first block of text which should contain # an optional intro # and the title # ======================================================= try: title = Title(self._title, self._next_footnote, self.doc_num) except TitleException: raise AphorismsToXMLException from None logger.debug('Title treated') title.xml_main() logger.debug('Title xml created') self._next_footnote = title.next_footnote # Add title to the xml main self.xml += title.xml # Now process the rest of the main text # ===================================== logger.debug('Start aphorisms and commentaries treatment') for k in self._aph_com: if not len(self._aph_com[k]): error = ('There are no aphorisms in the file. ' 'It can be because of the numeration. ' 'Verify that the it is starting at 1 or 1. not .1 ' '(the point can be after the number but not before.') logger.error(error) raise AphorismsToXMLException aphorism = self._aph_com[k][0] commentaries = self._aph_com[k][1:] # Add initial XML for the aphorism + commentary unit self.xml.append(self.xml_oss * self.xml_n_offset + '<div n="' + str(k) + '" type="aphorism_commentary_unit">') # Add initial XML for this aphorism self.xml.append(self.xml_oss * (self.xml_n_offset + 1) + '<div type="aphorism">') self.xml.append(self.xml_oss * (self.xml_n_offset + 2) + '<p>') # Now process any witnesses in it. If this fails with an # Exception print an error and return try: line_ref = references(aphorism) except AnalysisException: error = ('Unable to process references in ' 'aphorism {}'.format(k)) logger.error(error) raise AphorismsToXMLException from None if line_ref is None or line_ref == '': continue # Process any footnotes in line_ref, if there are errors write # to the log file and return try: self.xml_n_offset += 3 xml_main_to_add, self._next_footnote = \ footnotes(line_ref, self._next_footnote) self.xml_n_offset -= 3 except (TypeError, AnalysisException): error = ('Unable to process footnotes in ' 'aphorism {}'.format(k)) logger.error(error) raise AphorismsToXMLException from None # Add the XML self.xml.extend(xml_main_to_add) # Close the XML for the aphorism self.xml.append(self.xml_oss * (self.xml_n_offset + 1) + '</p>') self.xml.append(self.xml_oss * self.xml_n_offset + '</div>') # Get the next line of text for n_com, line in enumerate(commentaries): # Workaround footnote on first word line = ' ' + line if line[-1] != '.': debug = ('Commentaries should ended with a `.`\n' 'Warning in aphorism {}\n' 'commentary {}'.format(k, line)) logger.debug(debug) # Add initial XML for this aphorism's commentary self.xml.append(self.xml_oss * self.xml_n_offset + '<div type="commentary">') self.xml.append(self.xml_oss * (self.xml_n_offset + 1) + '<p>') # Now process any witnesses in this line. If this fails with a # CommentaryToEpidocException and log an error try: line_ref = references(line) except AnalysisException: error = ('Unable to process references, ' 'commentary {} for aphorism ' '{}'.format(n_com+1, k)) logger.error(error) raise AphorismsToXMLException from None # Process any _footnotes in line_ref. If this fails with a # CommentaryToEpidocException and log an error try: self.xml_n_offset += 3 xml_main_to_add, self._next_footnote = \ footnotes(line_ref, self._next_footnote) self.xml_n_offset -= 3 except (TypeError, AnalysisException): error = ('Unable to process footnote, ' 'commentary {} for aphorism ' '{}'.format(n_com+1, k)) logger.error(error) raise AphorismsToXMLException from None # Add the XML self.xml.extend(xml_main_to_add) # Close the XML for this commentary self.xml.append(self.xml_oss * (self.xml_n_offset + 1) + '</p>') self.xml.append(self.xml_oss * self.xml_n_offset + '</div>') # Close the XML for the aphorism + commentary unit self.xml.append(self.xml_oss * self.xml_n_offset + '</div>') logger.debug('Finish aphorisms and commentaries treatment') # Save the xmls created self._create_xml() self.save_xml(self.xml_file) self._validate_xml() logger.debug('Save main xml')
# Copyright (C) 2009 Duncan McGreggor <duncan@canonical.com> # Licenced under the txaws licence available at /LICENSE in the txaws source. from txaws.credentials import AWSCredentials from txaws.ec2.client import EC2Client try: from txaws.s3.client import S3Client except ImportError: s3clientSkip = ("S3Client couldn't be imported (perhaps because epsilon, " "on which it depends, isn't present)") else: s3clientSkip = None from txaws.service import (AWSServiceEndpoint, AWSServiceRegion, EC2_ENDPOINT_EU, EC2_ENDPOINT_US, REGION_EU) from txaws.testing.base import TXAWSTestCase class AWSServiceEndpointTestCase(TXAWSTestCase): def setUp(self): self.endpoint = AWSServiceEndpoint(uri="http://my.service/da_endpoint") def test_simple_creation(self): endpoint = AWSServiceEndpoint() self.assertEquals(endpoint.scheme, "http") self.assertEquals(endpoint.host, "") self.assertEquals(endpoint.port, None) self.assertEquals(endpoint.path, "/") self.assertEquals(endpoint.method, "GET") def test_custom_method(self): endpoint = AWSServiceEndpoint( uri="http://service/endpoint", method="PUT") self.assertEquals(endpoint.method, "PUT") def test_parse_uri(self): self.assertEquals(self.endpoint.scheme, "http") self.assertEquals(self.endpoint.host, "my.service") self.assertIdentical(self.endpoint.port, None) self.assertEquals(self.endpoint.path, "/da_endpoint") def test_parse_uri_https_and_custom_port(self): endpoint = AWSServiceEndpoint(uri="https://my.service:8080/endpoint") self.assertEquals(endpoint.scheme, "https") self.assertEquals(endpoint.host, "my.service") self.assertEquals(endpoint.port, 8080) self.assertEquals(endpoint.path, "/endpoint") def test_get_uri(self): uri = self.endpoint.get_uri() self.assertEquals(uri, "http://my.service/da_endpoint") def test_get_uri_custom_port(self): uri = "https://my.service:8080/endpoint" endpoint = AWSServiceEndpoint(uri=uri) new_uri = endpoint.get_uri() self.assertEquals(new_uri, uri) def test_set_host(self): self.assertEquals(self.endpoint.host, "my.service") self.endpoint.set_host("newhost.com") self.assertEquals(self.endpoint.host, "newhost.com") def test_get_host(self): self.assertEquals(self.endpoint.host, self.endpoint.get_host()) def test_get_canonical_host(self): """ If the port is not specified the canonical host is the same as the host. """ uri = "http://my.service/endpoint" endpoint = AWSServiceEndpoint(uri=uri) self.assertEquals("my.service", endpoint.get_canonical_host()) def test_get_canonical_host_with_non_default_port(self): """ If the port is not the default, the canonical host includes it. """ uri = "http://my.service:99/endpoint" endpoint = AWSServiceEndpoint(uri=uri) self.assertEquals("my.service:99", endpoint.get_canonical_host()) def test_get_canonical_host_is_lower_case(self): """ The canonical host is guaranteed to be lower case. """ uri = "http://MY.SerVice:99/endpoint" endpoint = AWSServiceEndpoint(uri=uri) self.assertEquals("my.service:99", endpoint.get_canonical_host()) def test_set_canonical_host(self): """ The canonical host is converted to lower case. """ endpoint = AWSServiceEndpoint() endpoint.set_canonical_host("My.Service") self.assertEquals("my.service", endpoint.host) self.assertIdentical(None, endpoint.port) def test_set_canonical_host_with_port(self): """ The canonical host can optionally have a port. """ endpoint = AWSServiceEndpoint() endpoint.set_canonical_host("my.service:99") self.assertEquals("my.service", endpoint.host) self.assertEquals(99, endpoint.port) def test_set_canonical_host_with_empty_port(self): """ The canonical host can also have no port. """ endpoint = AWSServiceEndpoint() endpoint.set_canonical_host("my.service:") self.assertEquals("my.service", endpoint.host) self.assertIdentical(None, endpoint.port) def test_set_path(self): self.endpoint.set_path("/newpath") self.assertEquals( self.endpoint.get_uri(), "http://my.service/newpath") def test_set_method(self): self.assertEquals(self.endpoint.method, "GET") self.endpoint.set_method("PUT") self.assertEquals(self.endpoint.method, "PUT") class AWSServiceRegionTestCase(TXAWSTestCase): def setUp(self): self.creds = AWSCredentials("foo", "bar") self.region = AWSServiceRegion(creds=self.creds) def test_simple_creation(self): self.assertEquals(self.creds, self.region.creds) self.assertEquals(self.region._clients, {}) self.assertEquals(self.region.ec2_endpoint.get_uri(), EC2_ENDPOINT_US) def test_creation_with_keys(self): region = AWSServiceRegion(access_key="baz", secret_key="quux") self.assertEquals(region.creds.access_key, "baz") self.assertEquals(region.creds.secret_key, "quux") def test_creation_with_keys_and_creds(self): """ creds take precedence over individual access key/secret key pairs. """ region = AWSServiceRegion(self.creds, access_key="baz", secret_key="quux") self.assertEquals(region.creds.access_key, "foo") self.assertEquals(region.creds.secret_key, "bar") def test_creation_with_uri(self): region = AWSServiceRegion( creds=self.creds, ec2_uri="http://foo/bar") self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar") def test_creation_with_uri_backwards_compatible(self): region = AWSServiceRegion( creds=self.creds, uri="http://foo/bar") self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar") def test_creation_with_uri_and_region(self): region = AWSServiceRegion( creds=self.creds, region=REGION_EU, ec2_uri="http://foo/bar") self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar") def test_creation_with_region_override(self): region = AWSServiceRegion(creds=self.creds, region=REGION_EU) self.assertEquals(region.ec2_endpoint.get_uri(), EC2_ENDPOINT_EU) def test_get_ec2_client_with_empty_cache(self): key = str(EC2Client) + str(self.creds) + str(self.region.ec2_endpoint) original_client = self.region._clients.get(key) new_client = self.region.get_client( EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint) self.assertEquals(original_client, None) self.assertTrue(isinstance(new_client, EC2Client)) self.assertNotEquals(original_client, new_client) def test_get_ec2_client_from_cache_default(self): client1 = self.region.get_ec2_client() client2 = self.region.get_ec2_client() self.assertTrue(isinstance(client1, EC2Client)) self.assertTrue(isinstance(client2, EC2Client)) self.assertEquals(client1, client2) def test_get_ec2_client_from_cache(self): client1 = self.region.get_client( EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint) client2 = self.region.get_client( EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint) self.assertTrue(isinstance(client1, EC2Client)) self.assertTrue(isinstance(client2, EC2Client)) self.assertEquals(client1, client2) def test_get_ec2_client_from_cache_with_purge(self): client1 = self.region.get_client( EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint, purge_cache=True) client2 = self.region.get_client( EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint, purge_cache=True) self.assertTrue(isinstance(client1, EC2Client)) self.assertTrue(isinstance(client2, EC2Client)) self.assertNotEquals(client1, client2) def test_get_s3_client_with_empty_cache(self): key = str(S3Client) + str(self.creds) + str(self.region.s3_endpoint) original_client = self.region._clients.get(key) new_client = self.region.get_client( S3Client, creds=self.creds, endpoint=self.region.s3_endpoint) self.assertEquals(original_client, None) self.assertTrue(isinstance(new_client, S3Client)) self.assertNotEquals(original_client, new_client) test_get_s3_client_with_empty_cache.skip = s3clientSkip
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class GenericQuestion(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ GenericQuestion - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'owner_id': 'str', 'created_at': 'datetime', 'question_text': 'str', 'type': 'str', 'classification': 'Classification', 'id': 'str', 'v': 'float', 'id': 'str', 'potential_answers': 'list[str]' } self.attribute_map = { 'owner_id': '_ownerId', 'created_at': '_createdAt', 'question_text': 'questionText', 'type': 'type', 'classification': 'classification', 'id': '_id', 'v': '__v', 'potential_answers': 'potentialAnswers' } self._owner_id = None self._created_at = None self._question_text = None self._type = None self._classification = None self._id = None self._v = None self._id = None self._potential_answers = None @property def owner_id(self): """ Gets the owner_id of this GenericQuestion. :return: The owner_id of this GenericQuestion. :rtype: str """ return self._owner_id @owner_id.setter def owner_id(self, owner_id): """ Sets the owner_id of this GenericQuestion. :param owner_id: The owner_id of this GenericQuestion. :type: str """ self._owner_id = owner_id @property def created_at(self): """ Gets the created_at of this GenericQuestion. :return: The created_at of this GenericQuestion. :rtype: datetime """ return self._created_at @created_at.setter def created_at(self, created_at): """ Sets the created_at of this GenericQuestion. :param created_at: The created_at of this GenericQuestion. :type: datetime """ self._created_at = created_at @property def question_text(self): """ Gets the question_text of this GenericQuestion. :return: The question_text of this GenericQuestion. :rtype: str """ return self._question_text @question_text.setter def question_text(self, question_text): """ Sets the question_text of this GenericQuestion. :param question_text: The question_text of this GenericQuestion. :type: str """ self._question_text = question_text @property def type(self): """ Gets the type of this GenericQuestion. :return: The type of this GenericQuestion. :rtype: str """ return self._type @type.setter def type(self, type): """ Sets the type of this GenericQuestion. :param type: The type of this GenericQuestion. :type: str """ self._type = type @property def classification(self): """ Gets the classification of this GenericQuestion. :return: The classification of this GenericQuestion. :rtype: Classification """ return self._classification @classification.setter def classification(self, classification): """ Sets the classification of this GenericQuestion. :param classification: The classification of this GenericQuestion. :type: Classification """ self._classification = classification @property def id(self): """ Gets the id of this GenericQuestion. :return: The id of this GenericQuestion. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this GenericQuestion. :param id: The id of this GenericQuestion. :type: str """ self._id = id @property def v(self): """ Gets the v of this GenericQuestion. :return: The v of this GenericQuestion. :rtype: float """ return self._v @v.setter def v(self, v): """ Sets the v of this GenericQuestion. :param v: The v of this GenericQuestion. :type: float """ self._v = v @property def id(self): """ Gets the id of this GenericQuestion. :return: The id of this GenericQuestion. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this GenericQuestion. :param id: The id of this GenericQuestion. :type: str """ self._id = id @property def potential_answers(self): """ Gets the potential_answers of this GenericQuestion. :return: The potential_answers of this GenericQuestion. :rtype: list[str] """ return self._potential_answers @potential_answers.setter def potential_answers(self, potential_answers): """ Sets the potential_answers of this GenericQuestion. :param potential_answers: The potential_answers of this GenericQuestion. :type: list[str] """ self._potential_answers = potential_answers def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, cast import cProfile import pstats import os import atexit import sys from pyspark.accumulators import AccumulatorParam if TYPE_CHECKING: from pyspark.context import SparkContext class ProfilerCollector: """ This class keeps track of different profilers on a per stage/UDF basis. Also this is used to create new profilers for the different stages/UDFs. """ def __init__( self, profiler_cls: Type["Profiler"], udf_profiler_cls: Type["Profiler"], dump_path: Optional[str] = None, ): self.profiler_cls: Type[Profiler] = profiler_cls self.udf_profiler_cls: Type[Profiler] = udf_profiler_cls self.profile_dump_path: Optional[str] = dump_path self.profilers: List[List[Any]] = [] def new_profiler(self, ctx: "SparkContext") -> "Profiler": """Create a new profiler using class `profiler_cls`""" return self.profiler_cls(ctx) def new_udf_profiler(self, ctx: "SparkContext") -> "Profiler": """Create a new profiler using class `udf_profiler_cls`""" return self.udf_profiler_cls(ctx) def add_profiler(self, id: int, profiler: "Profiler") -> None: """Add a profiler for RDD/UDF `id`""" if not self.profilers: if self.profile_dump_path: atexit.register(self.dump_profiles, self.profile_dump_path) else: atexit.register(self.show_profiles) self.profilers.append([id, profiler, False]) def dump_profiles(self, path: str) -> None: """Dump the profile stats into directory `path`""" for id, profiler, _ in self.profilers: profiler.dump(id, path) self.profilers = [] def show_profiles(self) -> None: """Print the profile stats to stdout""" for i, (id, profiler, showed) in enumerate(self.profilers): if not showed and profiler: profiler.show(id) # mark it as showed self.profilers[i][2] = True class Profiler: """ PySpark supports custom profilers, this is to allow for different profilers to be used as well as outputting to different formats than what is provided in the BasicProfiler. A custom profiler has to define or inherit the following methods: profile - will produce a system profile of some sort. stats - return the collected stats. dump - dumps the profiles to a path add - adds a profile to the existing accumulated profile The profiler class is chosen when creating a SparkContext Examples -------- >>> from pyspark import SparkConf, SparkContext >>> from pyspark import BasicProfiler >>> class MyCustomProfiler(BasicProfiler): ... def show(self, id): ... print("My custom profiles for RDD:%s" % id) ... >>> conf = SparkConf().set("spark.python.profile", "true") >>> sc = SparkContext('local', 'test', conf=conf, profiler_cls=MyCustomProfiler) >>> sc.parallelize(range(1000)).map(lambda x: 2 * x).take(10) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] >>> sc.parallelize(range(1000)).count() 1000 >>> sc.show_profiles() My custom profiles for RDD:1 My custom profiles for RDD:3 >>> sc.stop() Notes ----- This API is a developer API. """ def __init__(self, ctx: "SparkContext") -> None: pass def profile(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: """Do profiling on the function `func`""" raise NotImplementedError def stats(self) -> pstats.Stats: """Return the collected profiling stats (pstats.Stats)""" raise NotImplementedError def show(self, id: int) -> None: """Print the profile stats to stdout, id is the RDD id""" stats = self.stats() if stats: print("=" * 60) print("Profile of RDD<id=%d>" % id) print("=" * 60) stats.sort_stats("time", "cumulative").print_stats() def dump(self, id: int, path: str) -> None: """Dump the profile into path, id is the RDD id""" if not os.path.exists(path): os.makedirs(path) stats = self.stats() if stats: p = os.path.join(path, "rdd_%d.pstats" % id) stats.dump_stats(p) class PStatsParam(AccumulatorParam[Optional[pstats.Stats]]): """PStatsParam is used to merge pstats.Stats""" @staticmethod def zero(value: Optional[pstats.Stats]) -> None: return None @staticmethod def addInPlace( value1: Optional[pstats.Stats], value2: Optional[pstats.Stats] ) -> Optional[pstats.Stats]: if value1 is None: return value2 value1.add(value2) return value1 class BasicProfiler(Profiler): """ BasicProfiler is the default profiler, which is implemented based on cProfile and Accumulator """ def __init__(self, ctx: "SparkContext") -> None: Profiler.__init__(self, ctx) # Creates a new accumulator for combining the profiles of different # partitions of a stage self._accumulator = ctx.accumulator(None, PStatsParam) # type: ignore[arg-type] def profile(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: """Runs and profiles the method to_profile passed in. A profile object is returned.""" pr = cProfile.Profile() ret = pr.runcall(func, *args, **kwargs) st = pstats.Stats(pr) st.stream = None # type: ignore[attr-defined] # make it picklable st.strip_dirs() # Adds a new profile to the existing accumulated value self._accumulator.add(st) # type: ignore[arg-type] return ret def stats(self) -> pstats.Stats: return cast(pstats.Stats, self._accumulator.value) class UDFBasicProfiler(BasicProfiler): """ UDFBasicProfiler is the profiler for Python/Pandas UDFs. """ def show(self, id: int) -> None: """Print the profile stats to stdout, id is the PythonUDF id""" stats = self.stats() if stats: print("=" * 60) print("Profile of UDF<id=%d>" % id) print("=" * 60) stats.sort_stats("time", "cumulative").print_stats() def dump(self, id: int, path: str) -> None: """Dump the profile into path, id is the PythonUDF id""" if not os.path.exists(path): os.makedirs(path) stats = self.stats() if stats: p = os.path.join(path, "udf_%d.pstats" % id) stats.dump_stats(p) if __name__ == "__main__": import doctest (failure_count, test_count) = doctest.testmod() if failure_count: sys.exit(-1)
# coding: utf-8 # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for modules/dashboard/analytics.""" __author__ = 'Julia Oh(juliaoh@google.com)' import datetime import os import time import cloudstorage import actions from actions import assert_contains from actions import assert_does_not_contain from actions import assert_equals from pipeline import models as pipeline_models from pipeline import pipeline import appengine_config from common import utils as common_utils from controllers import sites from controllers import utils from models import config from models import courses from models import entities from models import jobs from models import models from models import transforms from models.progress import ProgressStats from models.progress import UnitLessonCompletionTracker from modules.analytics import rest_providers from modules.analytics import synchronous_providers from modules.mapreduce import mapreduce_module from google.appengine.ext import db class AnalyticsTabsWithNoJobs(actions.TestBase): def tearDown(self): config.Registry.test_overrides.clear() def test_blank_students_tab_no_mr(self): email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_students') def test_blank_questions_tab_no_mr(self): email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_questions') def test_blank_assessments_tab_no_mr(self): email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_assessments') def test_blank_peer_review_tab_no_mr(self): email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_peer_review') def test_blank_students_tab_with_mr(self): config.Registry.test_overrides[ mapreduce_module.GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS.name] = True email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_students') def test_blank_questions_tab_with_mr(self): config.Registry.test_overrides[ mapreduce_module.GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS.name] = True email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_questions') def test_blank_assessments_tab_with_mr(self): config.Registry.test_overrides[ mapreduce_module.GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS.name] = True email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_assessments') def test_blank_peer_review_tab_with_mr(self): config.Registry.test_overrides[ mapreduce_module.GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS.name] = True email = 'admin@google.com' actions.login(email, is_admin=True) self.get('dashboard?action=analytics_peer_review') class ProgressAnalyticsTest(actions.TestBase): """Tests the progress analytics page on the Course Author dashboard.""" EXPECTED_TASK_COUNT = 3 def enable_progress_tracking(self): config.Registry.test_overrides[ utils.CAN_PERSIST_ACTIVITY_EVENTS.name] = True def test_empty_student_progress_stats_analytics_displays_nothing(self): """Test analytics page on course dashboard when no progress stats.""" # The admin looks at the analytics page on the board to check right # message when no progress has been recorded. email = 'admin@google.com' actions.login(email, is_admin=True) response = self.get('dashboard?action=analytics_students') assert_contains( 'Google &gt; Dashboard &gt; Manage &gt; Students', response.body) assert_contains('have not been calculated yet', response.body) response = response.forms[ 'gcb-generate-analytics-data'].submit().follow() assert len(self.taskq.GetTasks('default')) == ( ProgressAnalyticsTest.EXPECTED_TASK_COUNT) assert_contains('is running', response.body) self.execute_all_deferred_tasks() response = self.get(response.request.url) assert_contains('were last updated at', response.body) assert_contains('currently enrolled: 0', response.body) assert_contains('total: 0', response.body) assert_contains('Student Progress', response.body) assert_contains( 'No student progress has been recorded for this course.', response.body) actions.logout() def test_student_progress_stats_analytics_displays_on_dashboard(self): """Test analytics page on course dashboard.""" self.enable_progress_tracking() student1 = 'student1@google.com' name1 = 'Test Student 1' student2 = 'student2@google.com' name2 = 'Test Student 2' # Student 1 completes a unit. actions.login(student1) actions.register(self, name1) actions.view_unit(self) actions.logout() # Student 2 completes a unit. actions.login(student2) actions.register(self, name2) actions.view_unit(self) actions.logout() # Admin logs back in and checks if progress exists. email = 'admin@google.com' actions.login(email, is_admin=True) response = self.get('dashboard?action=analytics_students') assert_contains( 'Google &gt; Dashboard &gt; Manage &gt; Students', response.body) assert_contains('have not been calculated yet', response.body) response = response.forms[ 'gcb-generate-analytics-data'].submit().follow() assert len(self.taskq.GetTasks('default')) == ( ProgressAnalyticsTest.EXPECTED_TASK_COUNT) response = self.get('dashboard?action=analytics_students') assert_contains('is running', response.body) self.execute_all_deferred_tasks() response = self.get('dashboard?action=analytics_students') assert_contains('were last updated at', response.body) assert_contains('currently enrolled: 2', response.body) assert_contains('total: 2', response.body) assert_contains('Student Progress', response.body) assert_does_not_contain( 'No student progress has been recorded for this course.', response.body) # JSON code for the completion statistics. assert_contains( '\\"u.1.l.1\\": {\\"progress\\": 0, \\"completed\\": 2}', response.body) assert_contains( '\\"u.1\\": {\\"progress\\": 2, \\"completed\\": 0}', response.body) def test_analytics_are_individually_cancelable_and_runnable(self): """Test run/cancel controls for individual analytics jobs.""" # Submit all analytics. email = 'admin@google.com' actions.login(email, is_admin=True) response = self.get('dashboard?action=analytics_peer_review') response = response.forms[ 'gcb-generate-analytics-data'].submit().follow() # Ensure that analytics appear to be running and have cancel buttons. assert_contains('is running', response.body) assert_contains('Cancel', response.body) # Now that all analytics are pending, ensure that we do _not_ have # an update-all button. with self.assertRaises(KeyError): response = response.forms['gcb-generate-analytics-data'] # Click the cancel button for one of the slower jobs. response = response.forms[ 'gcb-cancel-visualization-peer_review'].submit().follow() # Verify that page shows job was canceled. assert_contains('error updating peer review statistics', response.body) assert_contains('Canceled by ' + email, response.body) # We should now have our update-statistics button back. self.assertIsNotNone(response.forms['gcb-generate-analytics-data']) # Should also have a button to run the canceled job; click that. response = response.forms[ 'gcb-run-visualization-peer_review'].submit().follow() # All jobs should now again be running, and update-all button gone. with self.assertRaises(KeyError): response = response.forms['gcb-generate-analytics-data'] def test_cancel_map_reduce(self): email = 'admin@google.com' actions.login(email, is_admin=True) response = self.get('dashboard?action=analytics_peer_review') response = response.forms[ 'gcb-run-visualization-peer_review'].submit().follow() # Launch 1st stage of map/reduce job; we must do this in order to # get the pipeline woken up enough to have built a root pipeline # record. Without this, we do not have an ID to use when canceling. self.execute_all_deferred_tasks(iteration_limit=1) # Cancel the job. response = response.forms[ 'gcb-cancel-visualization-peer_review'].submit().follow() assert_contains('Canceled by ' + email, response.body) # Now permit any pending tasks to complete, and expect the job's # status message to remain at "Canceled by ...". # # If the cancel didn't take effect, the map/reduce should have run to # completion and the job's status would change to completed, changing # the message. This is verified in # model_jobs.JobOperationsTest.test_killed_job_can_still_complete self.execute_all_deferred_tasks() response = self.get(response.request.url) assert_contains('Canceled by ' + email, response.body) def test_get_entity_id_wrapper_in_progress_works(self): """Tests get_entity_id wrappers in progress.ProgressStats.""" sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) progress_stats = ProgressStats(course) unit1 = course.add_unit() assert_equals( progress_stats._get_unit_ids_of_type_unit(), [unit1.unit_id]) assessment1 = course.add_assessment() assert_equals( progress_stats._get_assessment_ids(), [assessment1.unit_id]) lesson11 = course.add_lesson(unit1) lesson12 = course.add_lesson(unit1) assert_equals( progress_stats._get_lesson_ids(unit1.unit_id), [lesson11.lesson_id, lesson12.lesson_id]) lesson11.has_activity = True course.set_activity_content(lesson11, u'var activity=[]', []) assert_equals( progress_stats._get_activity_ids(unit1.unit_id, lesson11.lesson_id), [0]) assert_equals( progress_stats._get_activity_ids(unit1.unit_id, lesson12.lesson_id), []) def test_get_entity_label_wrapper_in_progress_works(self): """Tests get_entity_label wrappers in progress.ProgressStats.""" sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) progress_stats = ProgressStats(course) unit1 = course.add_unit() assert_equals( progress_stats._get_unit_label(unit1.unit_id), 'Unit %s' % unit1.index) assessment1 = course.add_assessment() assert_equals( progress_stats._get_assessment_label(assessment1.unit_id), assessment1.title) lesson11 = course.add_lesson(unit1) lesson12 = course.add_lesson(unit1) assert_equals( progress_stats._get_lesson_label(unit1.unit_id, lesson11.lesson_id), lesson11.index) lesson11.has_activity = True course.set_activity_content(lesson11, u'var activity=[]', []) assert_equals( progress_stats._get_activity_label( unit1.unit_id, lesson11.lesson_id, 0), 'L1.1') assert_equals( progress_stats._get_activity_label( unit1.unit_id, lesson12.lesson_id, 0), 'L1.2') lesson12.objectives = """ <question quid="123" weight="1" instanceid=1></question> random_text <gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube> more_random_text <question-group qgid="456" instanceid=2></question-group> yet_more_random_text """ cpt_ids = progress_stats._get_component_ids( unit1.unit_id, lesson12.lesson_id, 0) self.assertEqual(set([u'1', u'2']), set(cpt_ids)) def test_compute_entity_dict_constructs_dict_correctly(self): sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) progress_stats = ProgressStats(course) course_dict = progress_stats.compute_entity_dict('course', []) assert_equals(course_dict, { 'label': 'UNTITLED COURSE', 'u': {}, 's': {}}) def test_compute_entity_dict_constructs_dict_for_empty_course_correctly( self): """Tests correct entity_structure is built.""" sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) unit1 = course.add_unit() assessment1 = course.add_assessment() progress_stats = ProgressStats(course) assert_equals( progress_stats.compute_entity_dict('course', []), {'label': 'UNTITLED COURSE', 'u': {unit1.unit_id: { 'label': 'Unit %s' % unit1.index, 'l': {}, 's': {}}}, 's': { assessment1.unit_id: {'label': assessment1.title}}}) lesson11 = course.add_lesson(unit1) assert_equals( progress_stats.compute_entity_dict('course', []), { "s": { assessment1.unit_id: { "label": assessment1.title } }, "u": { unit1.unit_id: { "s": {}, "l": { lesson11.lesson_id: { "a": {}, "h": { 0: { "c": {}, "label": "L1.1" } }, "label": lesson11.index } }, "label": "Unit %s" % unit1.index } }, 'label': 'UNTITLED COURSE' }) lesson11.objectives = """ <question quid="123" weight="1" instanceid="1"></question> random_text <gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube> more_random_text <question-group qgid="456" instanceid="2"></question-group> yet_more_random_text """ assert_equals( progress_stats.compute_entity_dict('course', []), { "s": { assessment1.unit_id: { "label": assessment1.title } }, "u": { unit1.unit_id: { "s": {}, "l": { lesson11.lesson_id: { "a": {}, "h": { 0: { "c": { u'1': { "label": "L1.1.1" }, u'2': { "label": "L1.1.2" } }, "label": "L1.1" } }, "label": lesson11.index } }, "label": "Unit %s" % unit1.index } }, "label": 'UNTITLED COURSE' }) def test_entity_dict_for_pre_post_assessment(self): """Tests correct entity_structure is built.""" sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) unit1 = course.add_unit() pre_assessment = course.add_assessment() pre_assessment.title = 'Pre Assessment' post_assessment = course.add_assessment() post_assessment.title = 'Post Assessment' # Neither pre nor post assessment for unit unit1.pre_assessment = None unit1.post_assessment = None progress_stats = ProgressStats(course) assert_equals( progress_stats.compute_entity_dict('course', []), {'s': { pre_assessment.unit_id: {'label': 'Pre Assessment'}, post_assessment.unit_id: {'label': 'Post Assessment'}}, 'u': {unit1.unit_id: { 's': {}, 'l': {}, 'label': 'Unit 1'}}, 'label': 'UNTITLED COURSE'}) # Only pre unit1.pre_assessment = pre_assessment.unit_id unit1.post_assessment = None progress_stats = ProgressStats(course) assert_equals( progress_stats.compute_entity_dict('course', []), {'s': {post_assessment.unit_id: {'label': 'Post Assessment'}}, 'u': {unit1.unit_id: { 's': {pre_assessment.unit_id: {'label': 'Pre Assessment'}}, 'l': {}, 'label': 'Unit 1'}}, 'label': 'UNTITLED COURSE'}) # Only post unit1.pre_assessment = None unit1.post_assessment = post_assessment.unit_id progress_stats = ProgressStats(course) assert_equals( progress_stats.compute_entity_dict('course', []), {'s': {pre_assessment.unit_id: {'label': 'Pre Assessment'}}, 'u': {unit1.unit_id: { 's': {post_assessment.unit_id: {'label': 'Post Assessment'}}, 'l': {}, 'label': 'Unit 1'}}, 'label': 'UNTITLED COURSE'}) # Pre and post assessment set. unit1.pre_assessment = pre_assessment.unit_id unit1.post_assessment = post_assessment.unit_id progress_stats = ProgressStats(course) assert_equals( progress_stats.compute_entity_dict('course', []), {'s': {}, 'u': {unit1.unit_id: { 's': { pre_assessment.unit_id: {'label': 'Pre Assessment'}, post_assessment.unit_id: {'label': 'Post Assessment'}}, 'l': {}, 'label': 'Unit 1'}}, 'label': 'UNTITLED COURSE'}) class QuestionAnalyticsTest(actions.TestBase): """Tests the question analytics page from Course Author dashboard.""" def _enable_activity_tracking(self): config.Registry.test_overrides[ utils.CAN_PERSIST_ACTIVITY_EVENTS.name] = True def _get_sample_v15_course(self): """Creates a course with different types of questions and returns it.""" sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) unit1 = course.add_unit() lesson1 = course.add_lesson(unit1) assessment_old = course.add_assessment() assessment_old.title = 'Old assessment' assessment_new = course.add_assessment() assessment_new.title = 'New assessment' assessment_peer = course.add_assessment() assessment_peer.title = 'Peer review assessment' # Create a multiple choice question. mcq_new_dict = { 'description': 'mcq_new', 'type': 0, # Multiple choice question. 'choices': [{ 'text': 'answer', 'score': 1.0 }], 'version': '1.5' } mcq_new_dto = models.QuestionDTO(None, mcq_new_dict) mcq_new_id = models.QuestionDAO.save(mcq_new_dto) # Create a short answer question. frq_new_dict = { 'defaultFeedback': '', 'rows': 1, 'description': 'short answer', 'hint': '', 'graders': [{ 'matcher': 'case_insensitive', 'score': '1.0', 'response': 'hi', 'feedback': '' }], 'question': 'short answer question', 'version': '1.5', 'type': 1, # Short answer question. 'columns': 100 } frq_new_dto = models.QuestionDTO(None, frq_new_dict) frq_new_id = models.QuestionDAO.save(frq_new_dto) # Create a question group. question_group_dict = { 'description': 'question_group', 'items': [ {'question': str(mcq_new_id)}, {'question': str(frq_new_id)}, {'question': str(mcq_new_id)} ], 'version': '1.5', 'introduction': '' } question_group_dto = models.QuestionGroupDTO(None, question_group_dict) question_group_id = models.QuestionGroupDAO.save(question_group_dto) # Add a MC question and a question group to leesson1. lesson1.objectives = """ <question quid="%s" weight="1" instanceid="QN"></question> random_text <gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube> more_random_text <question-group qgid="%s" instanceid="QG"></question-group> """ % (mcq_new_id, question_group_id) # Add a MC question, a short answer question, and a question group to # new style assessment. assessment_new.html_content = """ <question quid="%s" weight="1" instanceid="QN2"></question> <question quid="%s" weight="1" instanceid="FRQ2"></question> random_text <gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube> more_random_text <question-group qgid="%s" instanceid="QG2"></question-group> """ % (mcq_new_id, frq_new_id, question_group_id) return course def test_get_summarized_question_list_from_event(self): """Tests the transform functions per event type.""" sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) question_aggregator = (synchronous_providers.QuestionStatsGenerator .MultipleChoiceQuestionAggregator(course)) event_payloads = open(os.path.join( appengine_config.BUNDLE_ROOT, 'tests/unit/common/event_payloads.json')).read() event_payload_dict = transforms.loads(event_payloads) for event_info in event_payload_dict.values(): questions = question_aggregator._process_event( event_info['event_source'], event_info['event_data']) assert_equals(questions, event_info['transformed_dict_list']) def test_compute_question_stats_on_empty_course_returns_empty_dicts(self): sites.setup_courses('course:/test::ns_test, course:/:/') app_context = sites.get_all_courses()[0] question_stats_computer = ( synchronous_providers.QuestionStatsGenerator(app_context)) id_to_questions, id_to_assessments = question_stats_computer.run() assert_equals({}, id_to_questions) assert_equals({}, id_to_assessments) def test_id_to_question_dict_constructed_correctly(self): """Tests id_to_question dicts are constructed correctly.""" course = self._get_sample_v15_course() tracker = UnitLessonCompletionTracker(course) assert_equals( tracker.get_id_to_questions_dict(), { 'u.1.l.2.c.QN': { 'answer_counts': [0], 'label': 'Unit 1 Lesson 1, Question mcq_new', 'location': 'unit?unit=1&lesson=2', 'num_attempts': 0, 'score': 0 }, 'u.1.l.2.c.QG.i.0': { 'answer_counts': [0], 'label': ('Unit 1 Lesson 1, Question Group question_group ' 'Question mcq_new'), 'location': 'unit?unit=1&lesson=2', 'num_attempts': 0, 'score': 0 }, 'u.1.l.2.c.QG.i.2': { 'answer_counts': [0], 'label': ('Unit 1 Lesson 1, Question Group question_group ' 'Question mcq_new'), 'location': 'unit?unit=1&lesson=2', 'num_attempts': 0, 'score': 0 } } ) assert_equals( tracker.get_id_to_assessments_dict(), { 's.4.c.QN2': { 'answer_counts': [0], 'label': 'New assessment, Question mcq_new', 'location': 'assessment?name=4', 'num_attempts': 0, 'score': 0 }, 's.4.c.QG2.i.0': { 'answer_counts': [0], 'label': ('New assessment, Question Group question_group ' 'Question mcq_new'), 'location': 'assessment?name=4', 'num_attempts': 0, 'score': 0 }, 's.4.c.QG2.i.2': { 'answer_counts': [0], 'label': ('New assessment, Question Group question_group ' 'Question mcq_new'), 'location': 'assessment?name=4', 'num_attempts': 0, 'score': 0 } } ) COURSE_ONE = 'course_one' COURSE_TWO = 'course_two' class CronCleanupTest(actions.TestBase): def setUp(self): super(CronCleanupTest, self).setUp() admin_email = 'admin@foo.com' self.course_one = actions.simple_add_course( COURSE_ONE, admin_email, 'Course One') self.course_two = actions.simple_add_course( COURSE_TWO, admin_email, 'Course Two') actions.login(admin_email, True) actions.register(self, admin_email, COURSE_ONE) actions.register(self, admin_email, COURSE_TWO) self.save_tz = os.environ.get('TZ') os.environ['TZ'] = 'GMT' time.tzset() def tearDown(self): if self.save_tz: os.environ['TZ'] = self.save_tz else: del os.environ['TZ'] time.tzset() def _clean_jobs(self, max_age): return mapreduce_module.CronMapreduceCleanupHandler._clean_mapreduce( max_age) def _get_num_root_jobs(self, course_name): with common_utils.Namespace('ns_' + course_name): return len(pipeline.get_root_list()['pipelines']) def _get_cloudstore_paths(self, course_name): ret = set() with common_utils.Namespace('ns_' + course_name): for state in pipeline.get_root_list()['pipelines']: root_key = db.Key.from_path( pipeline_models._PipelineRecord.kind(), state['pipelineId']) paths = (mapreduce_module.CronMapreduceCleanupHandler ._collect_cloudstore_paths(root_key)) ret = ret.union(paths) return ret def _assert_cloudstore_paths_removed(self, course_name, paths): with common_utils.Namespace('ns_' + course_name): for path in paths: with self.assertRaises(cloudstorage.NotFoundError): cloudstorage.open(path) def _force_finalize(self, job): # For reasons that I do not grok, running the deferred task list # until it empties out in test mode does not wind up marking the # root job as 'done'. (Whereas when running the actual service, # the job does get marked 'done'.) This has already cost me most # of two hours of debugging, and I'm no closer to figuring out why, # much less having a monkey-patch into the Map/Reduce or Pipeline # libraries that would correct this. Cleaner to just transition # the job into a completed state manually. root_pipeline_id = jobs.MapReduceJob.get_root_pipeline_id(job.load()) with common_utils.Namespace(job._namespace): p = pipeline.Pipeline.from_id(root_pipeline_id) context = pipeline._PipelineContext('', 'default', '') context.transition_complete(p._pipeline_key) def test_non_admin_cannot_cleanup(self): actions.login('joe_user@foo.com') response = self.get('/cron/mapreduce/cleanup', expect_errors=True) self.assertEquals(400, response.status_int) def test_admin_cleanup_gets_200_ok(self): response = self.get('/cron/mapreduce/cleanup', expect_errors=True, headers={'X-AppEngine-Cron': 'True'}) self.assertEquals(200, response.status_int) def test_no_jobs_no_cleanup(self): self.assertEquals(0, self._clean_jobs(datetime.timedelta(seconds=0))) def test_unstarted_job_not_cleaned(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(0, self._clean_jobs(datetime.timedelta(minutes=1))) def test_active_job_not_cleaned(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks(iteration_limit=1) self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(0, self._clean_jobs(datetime.timedelta(minutes=1))) def test_completed_job_is_not_cleaned(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks() self._force_finalize(mapper) self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(0, self._clean_jobs(datetime.timedelta(minutes=1))) def test_terminated_job_with_no_start_time_is_cleaned(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks(iteration_limit=1) mapper.cancel() self.execute_all_deferred_tasks() self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(1, self._clean_jobs(datetime.timedelta(minutes=1))) self.execute_all_deferred_tasks(iteration_limit=1) self.assertEquals(0, self._get_num_root_jobs(COURSE_ONE)) def test_incomplete_job_cleaned_if_time_expired(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks(iteration_limit=1) self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(1, self._clean_jobs(datetime.timedelta(seconds=0))) self.execute_all_deferred_tasks() # Run deferred deletion task. self.assertEquals(0, self._get_num_root_jobs(COURSE_ONE)) def test_completed_job_cleaned_if_time_expired(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks() self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(1, self._clean_jobs(datetime.timedelta(seconds=0))) paths = self._get_cloudstore_paths(COURSE_ONE) self.assertTrue(len(paths) == 6 or len(paths) == 3) self.execute_all_deferred_tasks() # Run deferred deletion task. self.assertEquals(0, self._get_num_root_jobs(COURSE_ONE)) self._assert_cloudstore_paths_removed(COURSE_ONE, paths) def test_multiple_runs_cleaned(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) for _ in range(0, 3): mapper.submit() self.execute_all_deferred_tasks() self.assertEquals(3, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(3, self._clean_jobs(datetime.timedelta(seconds=0))) paths = self._get_cloudstore_paths(COURSE_ONE) self.assertTrue(len(paths) == 18 or len(paths) == 9) self.execute_all_deferred_tasks() # Run deferred deletion task. self.assertEquals(0, self._get_num_root_jobs(COURSE_ONE)) self._assert_cloudstore_paths_removed(COURSE_ONE, paths) def test_cleanup_modifies_incomplete_status(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks(iteration_limit=1) self.assertEquals(jobs.STATUS_CODE_STARTED, mapper.load().status_code) self.assertEquals(1, self._clean_jobs(datetime.timedelta(seconds=0))) self.assertEquals(jobs.STATUS_CODE_FAILED, mapper.load().status_code) self.assertIn('assumed to have failed', mapper.load().output) def test_cleanup_does_not_modify_completed_status(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks() self.assertEquals(jobs.STATUS_CODE_COMPLETED, mapper.load().status_code) self.assertEquals(1, self._clean_jobs(datetime.timedelta(seconds=0))) self.assertEquals(jobs.STATUS_CODE_COMPLETED, mapper.load().status_code) def test_cleanup_in_multiple_namespaces(self): mapper_one = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper_two = rest_providers.LabelsOnStudentsGenerator(self.course_two) for _ in range(0, 2): mapper_one.submit() mapper_two.submit() self.execute_all_deferred_tasks() self.assertEquals(2, self._get_num_root_jobs(COURSE_ONE)) course_one_paths = self._get_cloudstore_paths(COURSE_ONE) self.assertTrue(len(course_one_paths) == 12 or len(course_one_paths) == 6) self.assertEquals(2, self._get_num_root_jobs(COURSE_TWO)) course_two_paths = self._get_cloudstore_paths(COURSE_TWO) self.assertTrue(len(course_two_paths) == 12 or len(course_two_paths) == 6) self.assertEquals(4, self._clean_jobs(datetime.timedelta(seconds=0))) self.execute_all_deferred_tasks() # Run deferred deletion task. self.assertEquals(0, self._get_num_root_jobs(COURSE_ONE)) self.assertEquals(0, self._get_num_root_jobs(COURSE_TWO)) self._assert_cloudstore_paths_removed(COURSE_ONE, course_one_paths) self._assert_cloudstore_paths_removed(COURSE_TWO, course_two_paths) def test_cleanup_handler(self): mapper = rest_providers.LabelsOnStudentsGenerator(self.course_one) mapper.submit() self.execute_all_deferred_tasks(iteration_limit=1) mapper.cancel() self.execute_all_deferred_tasks() self.assertEquals(1, self._get_num_root_jobs(COURSE_ONE)) # Check that hitting the cron handler via GET works as well. # Note that since the actual handler uses a max time limit of # a few days, we need to set up a canceled job which, having # no defined start-time will be cleaned up immediately. self.get('/cron/mapreduce/cleanup', headers={'X-AppEngine-Cron': 'True'}) self.execute_all_deferred_tasks(iteration_limit=1) self.assertEquals(0, self._get_num_root_jobs(COURSE_ONE)) class DummyEntity(entities.BaseEntity): NUM_ENTITIES = 1000 data = db.TextProperty(indexed=False) class DummyDTO(object): def __init__(self, the_id, the_dict): self.id = the_id self.dict = the_dict class DummyDAO(models.BaseJsonDao): DTO = DummyDTO ENTITY = DummyEntity ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeId CURRENT_VERSION = '1.0' @classmethod def upsert(cls, the_id, the_dict): dto = cls.load(the_id) if not dto: dto = DummyDTO(the_id, the_dict) cls.save(dto) class DummyMapReduceJob(jobs.MapReduceJob): NUM_SHARDS = 10 BOGUS_VALUE_ADDED_IN_COMBINE_STEP = 3 TOTAL_AGGREGATION_KEY = 'total' def entity_class(self): return DummyEntity @staticmethod def map(item): # Count up by 1 for this shard. yield item.key().id() % DummyMapReduceJob.NUM_SHARDS, 1 # Count up by 1 for the total number of items processed by M/R job. yield DummyMapReduceJob.TOTAL_AGGREGATION_KEY, 1 @staticmethod def combine(key, values, prev_combine_results=None): if key != DummyMapReduceJob.TOTAL_AGGREGATION_KEY: # Here, we are pretending that the individual key/values # other than 'total' are not combine-able. We thus pass # through the individual values for each item unchanged. # Note that this verifies that it is supported that # combine() may yield multiple values for a single key. for value in values: yield value if prev_combine_results: for value in prev_combine_results: yield value else: # Aggregate values for 'total' here in combine step. ret = 0 for value in values: ret += int(value) if prev_combine_results: for value in prev_combine_results: ret += int(value) # Add a weird value to prove that combine() has been called. ret += DummyMapReduceJob.BOGUS_VALUE_ADDED_IN_COMBINE_STEP yield ret @staticmethod def reduce(key, values): ret = 0 for value in values: ret += int(value) yield key, ret class MapReduceSimpleTest(actions.TestBase): # Reserve a bunch of IDs; it appears that when module registration creates # objects, some ID counts are reserved, globally, such that we cannot # re-use those IDs, even when explicitly set on a different entity type. ID_FUDGE = 50 def setUp(self): super(MapReduceSimpleTest, self).setUp() admin_email = 'admin@foo.com' self.context = actions.simple_add_course('mr_test', admin_email, 'Test') actions.login(admin_email, is_admin=True) with common_utils.Namespace('ns_mr_test'): # Start range after zero, because of reserved/consumed IDs. for key in range(self.ID_FUDGE, DummyEntity.NUM_ENTITIES + self.ID_FUDGE): DummyDAO.upsert(key, {}) def test_basic_operation(self): job = DummyMapReduceJob(self.context) job.submit() self.execute_all_deferred_tasks() results = jobs.MapReduceJob.get_results(job.load()) # Expect to see a quantity of results equal to the number of shards, # plus one for the 'total' result. self.assertEquals(DummyMapReduceJob.NUM_SHARDS + 1, len(results)) for key, value in results: if key == DummyMapReduceJob.TOTAL_AGGREGATION_KEY: # Here, we are making the entirely unwarranted assumption that # combine() will be called exactly once. However, given that # the entire m/r is being done on a chunk of values that's # within the library's single-chunk size, and given that it's # running all on one host, etc., it turns out to be reliably # true that combine() is called exactly once. self.assertEquals( DummyEntity.NUM_ENTITIES + DummyMapReduceJob.BOGUS_VALUE_ADDED_IN_COMBINE_STEP, value) else: # Here, check that each shard has been correctly aggregated by # the reduce step (and implicitly that the values for the # indivdual shards made it through the combine() step # unchanged) self.assertEquals( DummyEntity.NUM_ENTITIES / DummyMapReduceJob.NUM_SHARDS, value)
import os import sys import vtkAll as vtk import math import time import types import functools import numpy as np from director import transformUtils from director import lcmUtils from director.timercallback import TimerCallback from director.asynctaskqueue import AsyncTaskQueue from director import objectmodel as om from director import visualization as vis from director import applogic as app from director.debugVis import DebugData from director import ikplanner from director import ioUtils from director.simpletimer import SimpleTimer from director.utime import getUtime from director import affordanceitems from director import robotstate from director import robotplanlistener from director import planplayback from director import affordanceupdater from director import segmentationpanel from director import segmentation from director import terraintask from director import footstepsdriverpanel from director.footstepsdriver import FootstepRequestGenerator from director import vtkNumpy as vnp from director.tasks.taskuserpanel import TaskUserPanel from director.tasks.taskuserpanel import ImageBasedAffordanceFit import director.tasks.robottasks as rt import director.tasks.taskmanagerwidget as tmw import drc as lcmdrc import copy from PythonQt import QtCore, QtGui class PolarisPlatformPlanner(object): def __init__(self, ikServer, robotSystem): self.ikServer = ikServer self.robotSystem = robotSystem self.terrainTask = terraintask.TerrainTask(robotSystem) self.initializedFlag = False self.plans = [] def initialize(self): self.initializedFlag = True self.updateFramesAndAffordance() self.setFoostepData() self.setFootstepDataForwards() self.footstepRequestGenerator = FootstepRequestGenerator(self.robotSystem.footstepsDriver) #define the frames we need relative to the box frame etc self.numFootsteps = 2 self.minHoldTime = 2 def updateAffordance(self): self.platform = om.findObjectByName('running board') self.dimensions = np.array(self.platform.getProperty('Dimensions')) def updateFramesAndAffordance(self): self.updateAffordance() self.getPlanningFrame() self.requestRaycastTerrain() # this method should set the planning frame def getPlanningFrame(self): platformToWorld = self.platform.getChildFrame().transform worldToPlatform = platformToWorld.GetLinearInverse() f = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel) footPosition = f.GetPosition() footPosInPlatformFrame = worldToPlatform.TransformPoint(footPosition) planFramePosInPlatformFrame = self.dimensions/2.0 planFramePosInPlatformFrame[1] = footPosInPlatformFrame[1] planFramePosInWorld = platformToWorld.TransformPoint(planFramePosInPlatformFrame) # now we want to find the homogeneous transform for the planning Frame _,quat = transformUtils.poseFromTransform(platformToWorld) self.planToWorld = transformUtils.transformFromPose(planFramePosInWorld,quat) # returns the f to plan transform, given fToWorld transform def getTransformToPlanningFrame(self,fToWorld): fToPlan = fToWorld fToPlan.PostMultiply() fToPlan.Concatenate(self.planToWorld.GetLinearInverse()) return fToPlan def getFootstepRelativeTransform(self): self.footstepsToPlan = [] for n in xrange(1,self.numFootsteps + 1): stepFrameName = 'step ' + str(n) + ' frame' fToWorld = transformUtils.copyFrame(om.findObjectByName(stepFrameName).transform) fToPlan = self.getTransformToPlanningFrame(fToWorld) self.footstepsToPlan.append(fToPlan) def visFootstepFrames(self): for n in xrange(1,self.numFootsteps + 1): fToPlan = self.footstepsToPlan[n-1] fToWorld = fToPlan fToWorld.PostMultiply() fToWorld.Concatenate(self.planToWorld) frameName = 'step_'+str(n)+'ToWorld' vis.updateFrame(fToWorld,frameName) def setFoostepData(self): self.footstepPosition = [] self.footstepPosition.append(np.array([-0.19052019522393965, -0.16752527574088918, 0.07678844136281959 ])) self.footstepPosition.append(np.array([-0.2111150611750166, 0.1621390575248655, 0.07571540514427666])) self.footstepPosition.append(np.array([ 0.19315482042625953, -0.2866541182385602, 0.016873465171285976])) self.footstepPosition.append(np.array([ 0.13708399594888881, 0.1522408848113495, 0.008706862136780541 ])) # note that this is in radians, transform to degrees self.footstepYaw = np.array([-0.77413819, -0.57931552, -0.75042088, -0.68140433]) self.footstepYaw = np.rad2deg(self.footstepYaw) def setFootstepDataForwards(self): self.footstepPositionForwards = [] self.footstepPositionForwards.append(np.array([-0.08774644, 0.0635555 , 0.07771066])) # narrow first step # self.footstepPositionForwards.append(np.array([-0.06954156, 0.14726368, 0.07522517])) # normal first step self.footstepPositionForwards.append(np.array([ 0.18256867, -0.11692981, 0.01602283])) self.footstepPositionForwards.append(np.array([ 0.31539397, 0.15317327, 0.04011487])) self.footstepYawForwards = np.array([0, 0, 0]) self.footstepYawForwards = np.rad2deg(self.footstepYawForwards) def planTurn(self): # request footsteps 1 and 2 footstepsToWorldList = self.getFootstepToWorldTransforms([0,1]) q = self.robotSystem.robotStateJointController.q request = self.footstepRequestGenerator.makeFootstepRequest(q, footstepsToWorldList, 'right', snapToTerrain=True) request.params.map_mode = lcmdrc.footstep_plan_params_t.TERRAIN_HEIGHTS_AND_NORMALS request = self.setMapModeToTerrainAndNormals(request) self.robotSystem.footstepsDriver.sendFootstepPlanRequest(request) def planStepDown(self): footstepsToWorldList = self.getFootstepToWorldTransforms([3]) q = self.robotSystem.robotStateJointController.q request = self.footstepRequestGenerator.makeFootstepRequest(q, footstepsToWorldList, 'left', snapToTerrain=True) self.robotSystem.footstepsDriver.sendFootstepPlanRequest(request) def planStepOff(self): footstepsToWorldList = self.getFootstepToWorldTransforms([2]) q = self.robotSystem.robotStateJointController.q request = self.footstepRequestGenerator.makeFootstepRequest(q, footstepsToWorldList, 'right', snapToTerrain=True) self.robotSystem.footstepsDriver.sendFootstepPlanRequest(request) def planWeightShift(self): ikPlanner = self.robotSystem.ikPlanner startPoseName = 'plan_start' endPoseName = 'plan_end' startPose = self.robotSystem.robotStateJointController.q ikPlanner.addPose(startPose, startPoseName) constraints = ikPlanner.createMovingBodyConstraints(startPoseName, lockBack=True, lockLeftArm=True, lockRightArm=True) constraints[0].rightFootEnabled = False constraints[0].shrinkFactor=0.1 constraints.append(ikPlanner.createKneePostureConstraint([1, 2.5])) cs = ikplanner.ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName) endPose, info = cs.runIk() ikPlanner.computeMultiPostureGoal([startPose, endPose]) # maybe should update the frame and affordance everytime we call a method? def planStepDownForwards(self): # need to make us step left foot forwards # set the walking defaults to be what we want q = self.getPlanningStartPose() footstepsToWorldList = self.getFootstepToWorldTransforms([0,1], stepOffDirection='forwards') request = self.footstepRequestGenerator.makeFootstepRequest(q, footstepsToWorldList, 'left', snapToTerrain=True) request.goal_steps[0].params.support_contact_groups = lcmdrc.footstep_params_t.SUPPORT_GROUPS_HEEL_MIDFOOT self.robotSystem.footstepsDriver.sendFootstepPlanRequest(request) def planStepOffForwards(self): q = self.getPlanningStartPose() footstepsToWorldList = self.getFootstepToWorldTransforms([2], stepOffDirection='forwards') request = self.footstepRequestGenerator.makeFootstepRequest(q, footstepsToWorldList, 'left', snapToTerrain=True) self.robotSystem.footstepsDriver.sendFootstepPlanRequest(request) def planWeightShiftForwards(self): pass def setMinHoldTime(self, request, minHoldTime): for stepMessages in request.goal_steps: stepMessages.params.drake_min_hold_time = minHoldTime return request def switchToPolarisPlatformParameters(self): self.robotSystem.footstepsDriver.params.setProperty('Defaults', 'Polaris Platform') #get the footsteps to world transform from footstepsToPlan transform def getFootstepToWorldTransforms(self,footstepIdx, stepOffDirection='sideways'): self.updateFramesAndAffordance() footstepsToWorldList = [] for j in footstepIdx: if stepOffDirection == 'sideways': rpy = np.array([0,0,self.footstepYaw[j]]) position = self.footstepPosition[j] else: rpy = np.array([0,0,0], self.footstepYawForwards[j]) position = self.footstepPositionForwards[j] footstepToPlan = transformUtils.frameFromPositionAndRPY(position,rpy) footstepToWorld = footstepToPlan footstepToWorld.PostMultiply(); footstepToWorld.Concatenate(self.planToWorld) footstepsToWorldList.append(footstepToWorld) return footstepsToWorldList def setMapModeToTerrainAndNormals(self,request): request.params.map_mode = lcmdrc.footstep_plan_params_t.TERRAIN_HEIGHTS_AND_NORMALS return request def spawnRunningBoardAffordance(self): boxDimensions = [0.4, 1.0, 0.05] stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) boxFrame = transformUtils.copyFrame(stanceFrame) boxFrame.PreMultiply() boxFrame.Translate(0.0, 0.0, -boxDimensions[2]/2.0) box = om.findObjectByName('running board') if not box: pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='running board', Dimensions=boxDimensions, pose=pose) box = self.robotSystem.affordanceManager.newAffordanceFromDescription(desc) return box def fitRunningBoardAtFeet(self): # get stance frame startPose = self.getPlanningStartPose() stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame) # get pointcloud and extract search region covering the running board polyData = segmentation.getCurrentRevolutionData() polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) _, polyData = segmentation.removeGround(polyData) polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1]) if not polyData.GetNumberOfPoints(): print 'empty search region point cloud' return vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False) # extract maximal points along the stance x axis perpAxis = stanceFrameAxes[0] edgeAxis = stanceFrameAxes[1] edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis) edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints) vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True) # ransac fit a line to the edge points linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints) if np.dot(lineDirection, stanceFrameAxes[1]) < 0: lineDirection = -lineDirection linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0]) dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection) p1 = linePoint + lineDirection*np.min(dists) p2 = linePoint + lineDirection*np.max(dists) vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False) # compute a new frame that is in plane with the stance frame # and matches the orientation and position of the detected edge origin = np.array(stanceFrame.GetPosition()) normal = np.array(stanceFrameAxes[2]) # project stance origin to edge, then back to foot frame originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection) originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal) zaxis = np.array(stanceFrameAxes[2]) yaxis = np.array(lineDirection) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) d = DebugData() d.addSphere(p1, radius=0.005) d.addSphere(p2, radius=0.005) d.addLine(p1, p2) d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0]) d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0]) d.addLine(originProjectedToPlane, origin, color=[0,1,0]) d.addLine(originProjectedToEdge, origin, color=[1,0,0]) vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False) # update the running board box affordance position and orientation to # fit the detected edge box = self.spawnRunningBoardAffordance() boxDimensions = box.getProperty('Dimensions') t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane) t.PreMultiply() t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0) box.getChildFrame().copyFrame(t) self.initialize() #passthrough methods to the terrain task # should force updating the affordance before doing this def requestRaycastTerrain(self): self.terrainTask.requestRaycastTerrain() def spawnGroundAffordance(self): self.terrainTask.spawnGroundAffordance() def spawnFootplaneGroundAffordance(self): self.terrainTask.spawnFootplaneGroundAffordance('right') def planArmsUp(self, stepOffDirection): ikPlanner = self.robotSystem.ikPlanner startPose = self.getPlanningStartPose() if stepOffDirection == 'forwards': endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'hands-forward', side='left') endPose = ikPlanner.getMergedPostureFromDatabase(endPose, 'General', 'hands-forward', side='right') else: endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'polaris_step_arm_safe', side='left') endPose = ikPlanner.getMergedPostureFromDatabase(endPose, 'General', 'polaris_step_arm_safe', side='right') plan = ikPlanner.computeMultiPostureGoal([startPose, endPose]) self.addPlan(plan) def addPlan(self, plan): self.plans.append(plan) def commitManipPlan(self): self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1]) def getPlanningStartPose(self): return self.robotSystem.robotStateJointController.q.copy() def planNominal(self): ikPlanner = self.robotSystem.ikPlanner startPose = self.getPlanningStartPose() endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'safe nominal') endPose, info = ikPlanner.computeStandPose(endPose) newPlan = ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) def addPlan(self, plan): self.plans.append(plan) class PolarisPlatformPlannerPanel(TaskUserPanel): def __init__(self, robotSystem): TaskUserPanel.__init__(self, windowTitle='Platform Task') self.robotSystem = robotSystem self.platformPlanner = PolarisPlatformPlanner(robotSystem.ikServer, robotSystem) self.addButtons() self.addDefaultProperties() self.addTasks() def addButtons(self): self.addManualButton('Fit Platform Affordance', self.onFitPlatformAffordance) self.addManualButton('Spawn Ground Affordance', self.onSpawnGroundAffordance) self.addManualButton('Raycast Terrain', self.onRaycastTerrain) self.addManualButton('Start', self.onStart) self.addManualButton('Update Affordance', self.onUpdateAffordance) self.addManualButton('Arms Up',self.onArmsUp) self.addManualButton('Plan Turn', self.onPlanTurn) self.addManualButton('Plan Step Down', self.onPlanStepDown) self.addManualButton('Plan Weight Shift', self.onPlanWeightShift) self.addManualButton('Plan Step Off', self.onPlanStepOff) def addDefaultProperties(self): self.params.addProperty('Step Off Direction', 0, attributes=om.PropertyAttributes(enumNames=['Forwards','Sideways'])) self._syncProperties() def _syncProperties(self): self.stepOffDirection = self.params.getPropertyEnumValue('Step Off Direction').lower() def onFitPlatformAffordance(self): self.platformPlanner.fitRunningBoardAtFeet() def onSpawnGroundAffordance(self): self.platformPlanner.spawnGroundAffordance() def onArmsUp(self): self.platformPlanner.planArmsUp(self.stepOffDirection) def onRaycastTerrain(self): self.platformPlanner.requestRaycastTerrain() def onStart(self): self.platformPlanner.initialize() def onUpdateAffordance(self): self.platformPlanner.updateFramesAndAffordance() def onPlanTurn(self): self._syncProperties() self.platformPlanner.planTurn() def onPlanStepDown(self): self._syncProperties() if self.stepOffDirection == 'forwards': self.platformPlanner.planStepDownForwards() else: self.platformPlanner.planStepDown() def onPlanWeightShift(self): self._syncProperties() if self.stepOffDirection == 'forwards': self.platformPlanner.planWeightShiftForwards() else: self.platformPlanner.planWeightShift() def onPlanStepOff(self): self._syncProperties() if self.stepOffDirection == 'forwards': self.platformPlanner.planStepOffForwards() else: self.platformPlanner.planStepOff() def addTasks(self): # some helpers self.folder = None def addTask(task, parent=None): parent = parent or self.folder self.taskTree.onAddTask(task, copy=False, parent=parent) def addFunc(func, name, parent=None): addTask(rt.CallbackTask(callback=func, name=name), parent=parent) def addFolder(name, parent=None): self.folder = self.taskTree.addGroup(name, parent=parent) return self.folder
""" Customer Model Tests. """ import decimal import sys from copy import deepcopy from unittest.mock import ANY, patch from django.contrib.auth import get_user_model from django.test import TestCase from django.utils import timezone from stripe.error import InvalidRequestError from djstripe import settings as djstripe_settings from djstripe.exceptions import MultipleSubscriptionException from djstripe.models import ( Card, Charge, Coupon, Customer, DjstripePaymentMethod, Invoice, Plan, Subscription ) from djstripe.settings import STRIPE_SECRET_KEY from . import ( FAKE_ACCOUNT, FAKE_CARD, FAKE_CARD_V, FAKE_CHARGE, FAKE_COUPON, FAKE_CUSTOMER, FAKE_CUSTOMER_II, FAKE_DISCOUNT_CUSTOMER, FAKE_INVOICE, FAKE_INVOICE_III, FAKE_INVOICEITEM, FAKE_PLAN, FAKE_SUBSCRIPTION, FAKE_SUBSCRIPTION_II, FAKE_UPCOMING_INVOICE, AssertStripeFksMixin, StripeList, datetime_to_unix, default_account ) # Don't try and use autospec=True for functions that have a exception side-effect on py3.4 # see https://bugs.python.org/issue23661 IS_EXCEPTION_AUTOSPEC_SUPPORTED = sys.version_info >= (3, 5) class TestCustomer(AssertStripeFksMixin, TestCase): def setUp(self): self.user = get_user_model().objects.create_user( username="pydanny", email="pydanny@gmail.com" ) self.customer = FAKE_CUSTOMER.create_for_user(self.user) self.payment_method, _ = DjstripePaymentMethod._get_or_create_source( FAKE_CARD, "card" ) self.card = self.payment_method.resolve() self.customer.default_source = self.payment_method self.customer.save() self.account = default_account() def test_str(self): self.assertEqual(str(self.customer), self.user.email) self.customer.subscriber.email = "" self.assertEqual(str(self.customer), self.customer.id) self.customer.subscriber = None self.assertEqual(str(self.customer), "{id} (deleted)".format(id=self.customer.id)) def test_account_balance(self): self.assertEqual(self.customer.account_balance, 0) self.assertEqual(self.customer.credits, 0) self.customer.account_balance = 1000 self.assertEqual(self.customer.account_balance, 1000) self.assertEqual(self.customer.credits, 0) self.assertEqual(self.customer.pending_charges, 1000) self.customer.account_balance = -1000 self.assertEqual(self.customer.account_balance, -1000) self.assertEqual(self.customer.credits, 1000) self.assertEqual(self.customer.pending_charges, 0) def test_customer_dashboard_url(self): expected_url = "https://dashboard.stripe.com/test/customers/{}".format( self.customer.id ) self.assertEqual(self.customer.get_stripe_dashboard_url(), expected_url) self.customer.livemode = True expected_url = "https://dashboard.stripe.com/customers/{}".format(self.customer.id) self.assertEqual(self.customer.get_stripe_dashboard_url(), expected_url) unsaved_customer = Customer() self.assertEqual(unsaved_customer.get_stripe_dashboard_url(), "") def test_customer_sync_unsupported_source(self): fake_customer = deepcopy(FAKE_CUSTOMER_II) fake_customer["default_source"]["object"] = fake_customer["sources"]["data"][0][ "object" ] = "fish" user = get_user_model().objects.create_user( username="test_user_sync_unsupported_source" ) synced_customer = fake_customer.create_for_user(user) self.assertEqual(0, synced_customer.legacy_cards.count()) self.assertEqual(0, synced_customer.sources.count()) self.assertEqual( synced_customer.default_source, DjstripePaymentMethod.objects.get(id=fake_customer["default_source"]["id"]), ) def test_customer_sync_has_subscriber_metadata(self): user = get_user_model().objects.create(username="test_metadata", id=12345) fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["id"] = "cus_sync_has_subscriber_metadata" fake_customer["metadata"] = {"djstripe_subscriber": "12345"} customer = Customer.sync_from_stripe_data(fake_customer) self.assertEqual(customer.subscriber, user) self.assertEqual(customer.metadata, {"djstripe_subscriber": "12345"}) def test_customer_sync_has_subscriber_metadata_disabled(self): user = get_user_model().objects.create(username="test_metadata_disabled", id=98765) fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["id"] = "cus_test_metadata_disabled" fake_customer["metadata"] = {"djstripe_subscriber": "98765"} with patch("djstripe.settings.SUBSCRIBER_CUSTOMER_KEY", return_value=""): customer = Customer.sync_from_stripe_data(fake_customer) self.assertNotEqual(customer.subscriber, user) self.assertNotEqual(customer.subscriber_id, 98765) self.assert_fks( customer, expected_blank_fks={"djstripe.Customer.coupon", "djstripe.Customer.subscriber"}, ) def test_customer_sync_has_bad_subscriber_metadata(self): fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["id"] = "cus_sync_has_bad_subscriber_metadata" fake_customer["metadata"] = {"djstripe_subscriber": "does_not_exist"} customer = Customer.sync_from_stripe_data(fake_customer) self.assertEqual(customer.subscriber, None) self.assertEqual(customer.metadata, {"djstripe_subscriber": "does_not_exist"}) self.assert_fks( customer, expected_blank_fks={"djstripe.Customer.coupon", "djstripe.Customer.subscriber"}, ) @patch("stripe.Customer.create") def test_customer_create_metadata_disabled(self, customer_mock): user = get_user_model().objects.create_user( username="test_user_create_metadata_disabled" ) fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["id"] = "cus_test_create_metadata_disabled" customer_mock.return_value = fake_customer djstripe_settings.SUBSCRIBER_CUSTOMER_KEY = "" customer = Customer.create(user) djstripe_settings.SUBSCRIBER_CUSTOMER_KEY = "djstripe_subscriber" customer_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, email="", idempotency_key=None, metadata={} ) self.assertEqual(customer.metadata, None) self.assert_fks( customer, expected_blank_fks={"djstripe.Customer.coupon", "djstripe.Customer.default_source"}, ) @patch("stripe.Card.retrieve", return_value=FAKE_CUSTOMER_II["default_source"]) def test_customer_sync_non_local_card(self, card_retrieve_mock): fake_customer = deepcopy(FAKE_CUSTOMER_II) fake_customer["id"] = fake_customer["sources"]["data"][0][ "customer" ] = "cus_test_sync_non_local_card" user = get_user_model().objects.create_user(username="test_user_sync_non_local_card") customer = fake_customer.create_for_user(user) self.assertEqual(customer.sources.count(), 0) self.assertEqual(customer.legacy_cards.count(), 1) self.assertEqual(customer.default_source.id, fake_customer["default_source"]["id"]) @patch("stripe.Customer.create") def test_customer_sync_no_sources(self, customer_mock): fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["id"] = "cus_test_sync_no_sources" fake_customer["default_source"] = None fake_customer["sources"] = None customer_mock.return_value = fake_customer user = get_user_model().objects.create_user(username="test_user_sync_non_local_card") customer = Customer.create(user) self.assertEqual( customer_mock.call_args_list[0][1].get("metadata"), {"djstripe_subscriber": user.pk} ) self.assertEqual(customer.sources.count(), 0) self.assertEqual(customer.legacy_cards.count(), 0) self.assertEqual(customer.default_source, None) self.assert_fks( customer, expected_blank_fks={"djstripe.Customer.coupon", "djstripe.Customer.default_source"}, ) def test_customer_sync_default_source_string(self): Customer.objects.all().delete() Card.objects.all().delete() customer_fake = deepcopy(FAKE_CUSTOMER) customer_fake["default_source"] = customer_fake["sources"]["data"][0][ "id" ] = "card_sync_source_string" customer = Customer.sync_from_stripe_data(customer_fake) self.assertEqual(customer.default_source.id, customer_fake["default_source"]) self.assertEqual(customer.legacy_cards.count(), 2) self.assertEqual(len(list(customer.payment_methods)), 2) self.assert_fks( customer, expected_blank_fks={"djstripe.Customer.coupon", "djstripe.Customer.subscriber"}, ) @patch("stripe.Customer.retrieve") def test_customer_purge_leaves_customer_record(self, customer_retrieve_fake): self.customer.purge() customer = Customer.objects.get(id=self.customer.id) self.assertTrue(customer.subscriber is None) self.assertTrue(customer.default_source is None) self.assertTrue(not customer.legacy_cards.all()) self.assertTrue(not customer.sources.all()) self.assertTrue(get_user_model().objects.filter(pk=self.user.pk).exists()) @patch("stripe.Customer.retrieve") def test_customer_delete_same_as_purge(self, customer_retrieve_fake): self.customer.delete() customer = Customer.objects.get(id=self.customer.id) self.assertTrue(customer.subscriber is None) self.assertTrue(customer.default_source is None) self.assertTrue(not customer.legacy_cards.all()) self.assertTrue(not customer.sources.all()) self.assertTrue(get_user_model().objects.filter(pk=self.user.pk).exists()) @patch("stripe.Customer.retrieve") def test_customer_purge_raises_customer_exception(self, customer_retrieve_mock): customer_retrieve_mock.side_effect = InvalidRequestError("No such customer:", "blah") self.customer.purge() customer = Customer.objects.get(id=self.customer.id) self.assertTrue(customer.subscriber is None) self.assertTrue(customer.default_source is None) self.assertTrue(not customer.legacy_cards.all()) self.assertTrue(not customer.sources.all()) self.assertTrue(get_user_model().objects.filter(pk=self.user.pk).exists()) customer_retrieve_mock.assert_called_with( id=self.customer.id, api_key=STRIPE_SECRET_KEY, expand=["default_source"] ) self.assertEqual(3, customer_retrieve_mock.call_count) @patch("stripe.Customer.retrieve") def test_customer_delete_raises_unexpected_exception(self, customer_retrieve_mock): customer_retrieve_mock.side_effect = InvalidRequestError( "Unexpected Exception", "blah" ) with self.assertRaisesMessage(InvalidRequestError, "Unexpected Exception"): self.customer.purge() customer_retrieve_mock.assert_called_once_with( id=self.customer.id, api_key=STRIPE_SECRET_KEY, expand=["default_source"] ) def test_can_charge(self): self.assertTrue(self.customer.can_charge()) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_add_card_set_default_true(self, customer_retrieve_mock): self.customer.add_card(FAKE_CARD["id"]) self.customer.add_card(FAKE_CARD_V["id"]) self.assertEqual(2, Card.objects.count()) self.assertEqual(FAKE_CARD_V["id"], self.customer.default_source.id) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_add_card_set_default_false(self, customer_retrieve_mock): self.customer.add_card(FAKE_CARD["id"], set_default=False) self.customer.add_card(FAKE_CARD_V["id"], set_default=False) self.assertEqual(2, Card.objects.count()) self.assertEqual(FAKE_CARD["id"], self.customer.default_source.id) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_add_card_set_default_false_with_single_card_still_becomes_default( self, customer_retrieve_mock ): self.customer.add_card(FAKE_CARD["id"], set_default=False) self.assertEqual(2, Card.objects.count()) self.assertEqual(FAKE_CARD["id"], self.customer.default_source.id) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_cannot_charge(self, customer_retrieve_fake): self.customer.delete() self.assertFalse(self.customer.can_charge()) def test_charge_accepts_only_decimals(self): with self.assertRaises(ValueError): self.customer.charge(10) @patch("stripe.Coupon.retrieve", return_value=deepcopy(FAKE_COUPON)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_add_coupon_by_id(self, customer_retrieve_mock, coupon_retrieve_mock): self.assertEqual(self.customer.coupon, None) self.customer.add_coupon(FAKE_COUPON["id"]) customer_retrieve_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, expand=["default_source"], id=FAKE_CUSTOMER["id"] ) @patch("stripe.Coupon.retrieve", return_value=deepcopy(FAKE_COUPON)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_add_coupon_by_object(self, customer_retrieve_mock, coupon_retrieve_mock): self.assertEqual(self.customer.coupon, None) coupon = Coupon.sync_from_stripe_data(FAKE_COUPON) fake_discount = deepcopy(FAKE_DISCOUNT_CUSTOMER) def fake_customer_save(self, *args, **kwargs): # fake the api coupon update behaviour coupon = self.pop("coupon", None) if coupon: self["discount"] = fake_discount else: self["discount"] = None return self with patch("tests.CustomerDict.save", new=fake_customer_save): self.customer.add_coupon(coupon) customer_retrieve_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, expand=["default_source"], id=FAKE_CUSTOMER["id"] ) self.customer.refresh_from_db() self.assert_fks(self.customer, expected_blank_fks={}) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") def test_refund_charge(self, charge_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account fake_charge_no_invoice = deepcopy(FAKE_CHARGE) fake_charge_no_invoice.update({"invoice": None}) charge_retrieve_mock.return_value = fake_charge_no_invoice charge, created = Charge._get_or_create_from_stripe_object(fake_charge_no_invoice) self.assertTrue(created) charge.refund() refunded_charge, created2 = Charge._get_or_create_from_stripe_object( fake_charge_no_invoice ) self.assertFalse(created2) self.assertEqual(refunded_charge.refunded, True) self.assertEqual(refunded_charge.amount_refunded, decimal.Decimal("22.00")) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") def test_refund_charge_object_returned( self, charge_retrieve_mock, default_account_mock ): default_account_mock.return_value = self.account fake_charge_no_invoice = deepcopy(FAKE_CHARGE) fake_charge_no_invoice.update({"invoice": None}) charge_retrieve_mock.return_value = fake_charge_no_invoice charge, created = Charge._get_or_create_from_stripe_object(fake_charge_no_invoice) self.assertTrue(created) refunded_charge = charge.refund() self.assertEqual(refunded_charge.refunded, True) self.assertEqual(refunded_charge.amount_refunded, decimal.Decimal("22.00")) def test_calculate_refund_amount_full_refund(self): charge = Charge( id="ch_111111", customer=self.customer, amount=decimal.Decimal("500.00") ) self.assertEqual(charge._calculate_refund_amount(), 50000) def test_calculate_refund_amount_partial_refund(self): charge = Charge( id="ch_111111", customer=self.customer, amount=decimal.Decimal("500.00") ) self.assertEqual( charge._calculate_refund_amount(amount=decimal.Decimal("300.00")), 30000 ) def test_calculate_refund_above_max_refund(self): charge = Charge( id="ch_111111", customer=self.customer, amount=decimal.Decimal("500.00") ) self.assertEqual( charge._calculate_refund_amount(amount=decimal.Decimal("600.00")), 50000 ) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") @patch("stripe.Charge.create") def test_charge_converts_dollars_into_cents( self, charge_create_mock, charge_retrieve_mock, default_account_mock ): default_account_mock.return_value = self.account fake_charge_copy = deepcopy(FAKE_CHARGE) fake_charge_copy.update({"invoice": None, "amount": 1000}) charge_create_mock.return_value = fake_charge_copy charge_retrieve_mock.return_value = fake_charge_copy self.customer.charge(amount=decimal.Decimal("10.00")) _, kwargs = charge_create_mock.call_args self.assertEqual(kwargs["amount"], 1000) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") @patch("stripe.Charge.create") @patch("stripe.Invoice.retrieve") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) def test_charge_doesnt_require_invoice( self, subscription_retrieve_mock, invoice_retrieve_mock, charge_create_mock, charge_retrieve_mock, default_account_mock, ): default_account_mock.return_value = self.account fake_charge_copy = deepcopy(FAKE_CHARGE) fake_charge_copy.update( {"invoice": FAKE_INVOICE["id"], "amount": FAKE_INVOICE["amount_due"]} ) fake_invoice_copy = deepcopy(FAKE_INVOICE) charge_create_mock.return_value = fake_charge_copy charge_retrieve_mock.return_value = fake_charge_copy invoice_retrieve_mock.return_value = fake_invoice_copy try: self.customer.charge(amount=decimal.Decimal("20.00")) except Invoice.DoesNotExist: self.fail(msg="Stripe Charge shouldn't throw Invoice DoesNotExist.") @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") @patch("stripe.Charge.create") def test_charge_passes_extra_arguments( self, charge_create_mock, charge_retrieve_mock, default_account_mock ): default_account_mock.return_value = self.account fake_charge_copy = deepcopy(FAKE_CHARGE) fake_charge_copy.update({"invoice": None}) charge_create_mock.return_value = fake_charge_copy charge_retrieve_mock.return_value = fake_charge_copy self.customer.charge( amount=decimal.Decimal("10.00"), capture=True, destination=FAKE_ACCOUNT["id"] ) _, kwargs = charge_create_mock.call_args self.assertEqual(kwargs["capture"], True) self.assertEqual(kwargs["destination"], FAKE_ACCOUNT["id"]) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") @patch("stripe.Charge.create") def test_charge_string_source( self, charge_create_mock, charge_retrieve_mock, default_account_mock ): default_account_mock.return_value = self.account fake_charge_copy = deepcopy(FAKE_CHARGE) fake_charge_copy.update({"invoice": None}) charge_create_mock.return_value = fake_charge_copy charge_retrieve_mock.return_value = fake_charge_copy self.customer.charge(amount=decimal.Decimal("10.00"), source=self.card.id) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Charge.retrieve") @patch("stripe.Charge.create") def test_charge_card_source( self, charge_create_mock, charge_retrieve_mock, default_account_mock ): default_account_mock.return_value = self.account fake_charge_copy = deepcopy(FAKE_CHARGE) fake_charge_copy.update({"invoice": None}) charge_create_mock.return_value = fake_charge_copy charge_retrieve_mock.return_value = fake_charge_copy self.customer.charge(amount=decimal.Decimal("10.00"), source=self.card) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) @patch( "stripe.Invoice.list", return_value=StripeList(data=[deepcopy(FAKE_INVOICE), deepcopy(FAKE_INVOICE_III)]), ) @patch("djstripe.models.Invoice.retry", autospec=True) def test_retry_unpaid_invoices( self, invoice_retry_mock, invoice_list_mock, charge_retrieve_mock, customer_retrieve_mock, subscription_retrive_mock, default_account_mock, ): default_account_mock.return_value = self.account self.customer.retry_unpaid_invoices() invoice = Invoice.objects.get(id=FAKE_INVOICE_III["id"]) invoice_retry_mock.assert_called_once_with(invoice) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) @patch("stripe.Invoice.list", return_value=StripeList(data=[deepcopy(FAKE_INVOICE)])) @patch("djstripe.models.Invoice.retry", autospec=True) def test_retry_unpaid_invoices_none_unpaid( self, invoice_retry_mock, invoice_list_mock, charge_retrieve_mock, customer_retrieve_mock, subscription_retrieve_mock, default_account_mock, ): default_account_mock.return_value = self.account self.customer.retry_unpaid_invoices() self.assertFalse(invoice_retry_mock.called) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) @patch( "stripe.Invoice.list", return_value=StripeList(data=[deepcopy(FAKE_INVOICE_III)]) ) @patch("djstripe.models.Invoice.retry", autospec=IS_EXCEPTION_AUTOSPEC_SUPPORTED) def test_retry_unpaid_invoices_expected_exception( self, invoice_retry_mock, invoice_list_mock, charge_retrieve_mock, customer_retrieve_mock, subscription_retrive_mock, default_account_mock, ): default_account_mock.return_value = self.account invoice_retry_mock.side_effect = InvalidRequestError( "Invoice is already paid", "blah" ) try: self.customer.retry_unpaid_invoices() except Exception: self.fail("Exception was unexpectedly raised.") @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) @patch( "stripe.Invoice.list", return_value=StripeList(data=[deepcopy(FAKE_INVOICE_III)]) ) @patch("djstripe.models.Invoice.retry", autospec=IS_EXCEPTION_AUTOSPEC_SUPPORTED) def test_retry_unpaid_invoices_unexpected_exception( self, invoice_retry_mock, invoice_list_mock, charge_retrieve_mock, customer_retrieve_mock, subscription_retrive_mock, default_account_mock, ): default_account_mock.return_value = self.account invoice_retry_mock.side_effect = InvalidRequestError("This should fail!", "blah") with self.assertRaisesMessage(InvalidRequestError, "This should fail!"): self.customer.retry_unpaid_invoices() @patch("stripe.Invoice.create") def test_send_invoice_success(self, invoice_create_mock): return_status = self.customer.send_invoice() self.assertTrue(return_status) invoice_create_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, customer=self.customer.id ) @patch("stripe.Invoice.create") def test_send_invoice_failure(self, invoice_create_mock): invoice_create_mock.side_effect = InvalidRequestError( "Invoice creation failed.", "blah" ) return_status = self.customer.send_invoice() self.assertFalse(return_status) invoice_create_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, customer=self.customer.id ) @patch("stripe.Coupon.retrieve", return_value=deepcopy(FAKE_COUPON)) def test_sync_customer_with_discount(self, coupon_retrieve_mock): self.assertIsNone(self.customer.coupon) fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["discount"] = deepcopy(FAKE_DISCOUNT_CUSTOMER) customer = Customer.sync_from_stripe_data(fake_customer) self.assertEqual(customer.coupon.id, FAKE_COUPON["id"]) self.assertIsNotNone(customer.coupon_start) self.assertIsNone(customer.coupon_end) @patch("stripe.Coupon.retrieve", return_value=deepcopy(FAKE_COUPON)) def test_sync_customer_discount_already_present(self, coupon_retrieve_mock): fake_customer = deepcopy(FAKE_CUSTOMER) fake_customer["discount"] = deepcopy(FAKE_DISCOUNT_CUSTOMER) # Set the customer's coupon to be what we'll sync customer = Customer.objects.get(id=FAKE_CUSTOMER["id"]) customer.coupon = Coupon.sync_from_stripe_data(FAKE_COUPON) customer.save() customer = Customer.sync_from_stripe_data(fake_customer) self.assertEqual(customer.coupon.id, FAKE_COUPON["id"]) def test_sync_customer_delete_discount(self): test_coupon = Coupon.sync_from_stripe_data(FAKE_COUPON) self.customer.coupon = test_coupon self.customer.save() self.assertEqual(self.customer.coupon.id, FAKE_COUPON["id"]) customer = Customer.sync_from_stripe_data(FAKE_CUSTOMER) self.assertEqual(customer.coupon, None) @patch("djstripe.models.Invoice.sync_from_stripe_data") @patch( "stripe.Invoice.list", return_value=StripeList(data=[deepcopy(FAKE_INVOICE), deepcopy(FAKE_INVOICE_III)]), ) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_sync_invoices( self, customer_retrieve_mock, invoice_list_mock, invoice_sync_mock ): self.customer._sync_invoices() self.assertEqual(2, invoice_sync_mock.call_count) @patch("djstripe.models.Invoice.sync_from_stripe_data") @patch("stripe.Invoice.list", return_value=StripeList(data=[])) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_sync_invoices_none( self, customer_retrieve_mock, invoice_list_mock, invoice_sync_mock ): self.customer._sync_invoices() self.assertEqual(0, invoice_sync_mock.call_count) @patch("djstripe.models.Charge.sync_from_stripe_data") @patch("stripe.Charge.list", return_value=StripeList(data=[deepcopy(FAKE_CHARGE)])) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_sync_charges( self, customer_retrieve_mock, charge_list_mock, charge_sync_mock ): self.customer._sync_charges() self.assertEqual(1, charge_sync_mock.call_count) @patch("djstripe.models.Charge.sync_from_stripe_data") @patch("stripe.Charge.list", return_value=StripeList(data=[])) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_sync_charges_none( self, customer_retrieve_mock, charge_list_mock, charge_sync_mock ): self.customer._sync_charges() self.assertEqual(0, charge_sync_mock.call_count) @patch("djstripe.models.Subscription.sync_from_stripe_data") @patch( "stripe.Subscription.list", return_value=StripeList( data=[deepcopy(FAKE_SUBSCRIPTION), deepcopy(FAKE_SUBSCRIPTION_II)] ), ) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_sync_subscriptions( self, customer_retrieve_mock, subscription_list_mock, subscription_sync_mock ): self.customer._sync_subscriptions() self.assertEqual(2, subscription_sync_mock.call_count) @patch("djstripe.models.Subscription.sync_from_stripe_data") @patch("stripe.Subscription.list", return_value=StripeList(data=[])) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_sync_subscriptions_none( self, customer_retrieve_mock, subscription_list_mock, subscription_sync_mock ): self.customer._sync_subscriptions() self.assertEqual(0, subscription_sync_mock.call_count) @patch("djstripe.models.Customer.send_invoice") @patch("stripe.Subscription.create", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_subscribe_not_charge_immediately( self, customer_retrieve_mock, subscription_create_mock, send_invoice_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) self.customer.subscribe(plan=plan, charge_immediately=False) self.assertFalse(send_invoice_mock.called) @patch("djstripe.models.Customer.send_invoice") @patch("stripe.Subscription.create", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_subscribe_charge_immediately( self, customer_retrieve_mock, subscription_create_mock, send_invoice_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) self.customer.subscribe(plan=plan, charge_immediately=True) self.assertTrue(send_invoice_mock.called) @patch("djstripe.models.Customer.send_invoice") @patch("stripe.Subscription.create", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_subscribe_plan_string( self, customer_retrieve_mock, subscription_create_mock, send_invoice_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) self.customer.subscribe(plan=plan.id, charge_immediately=True) self.assertTrue(send_invoice_mock.called) @patch("stripe.Subscription.create") @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_subscription_shortcut_with_multiple_subscriptions( self, customer_retrieve_mock, subscription_create_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) subscription_fake_duplicate = deepcopy(FAKE_SUBSCRIPTION) subscription_fake_duplicate["id"] = "sub_6lsC8pt7IcF8jd" subscription_create_mock.side_effect = [ deepcopy(FAKE_SUBSCRIPTION), subscription_fake_duplicate, ] self.customer.subscribe(plan=plan, charge_immediately=False) self.customer.subscribe(plan=plan, charge_immediately=False) self.assertEqual(2, self.customer.subscriptions.count()) with self.assertRaises(MultipleSubscriptionException): self.customer.subscription @patch("stripe.Subscription.create") @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_has_active_subscription_with_unspecified_plan_with_multiple_subscriptions( self, customer_retrieve_mock, subscription_create_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) subscription_fake = deepcopy(FAKE_SUBSCRIPTION) subscription_fake["current_period_end"] = datetime_to_unix( timezone.now() + timezone.timedelta(days=7) ) subscription_fake_duplicate = deepcopy(FAKE_SUBSCRIPTION) subscription_fake_duplicate["current_period_end"] = datetime_to_unix( timezone.now() + timezone.timedelta(days=7) ) subscription_fake_duplicate["id"] = "sub_6lsC8pt7IcF8jd" subscription_create_mock.side_effect = [ subscription_fake, subscription_fake_duplicate, ] self.customer.subscribe(plan=plan, charge_immediately=False) self.customer.subscribe(plan=plan, charge_immediately=False) self.assertEqual(2, self.customer.subscriptions.count()) with self.assertRaises(TypeError): self.customer.has_active_subscription() @patch("stripe.Subscription.create") @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_has_active_subscription_with_plan( self, customer_retrieve_mock, subscription_create_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) subscription_fake = deepcopy(FAKE_SUBSCRIPTION) subscription_fake["current_period_end"] = datetime_to_unix( timezone.now() + timezone.timedelta(days=7) ) subscription_create_mock.return_value = subscription_fake self.customer.subscribe(plan=plan, charge_immediately=False) self.customer.has_active_subscription(plan=plan) @patch("stripe.Subscription.create") @patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER)) def test_has_active_subscription_with_plan_string( self, customer_retrieve_mock, subscription_create_mock ): plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN)) subscription_fake = deepcopy(FAKE_SUBSCRIPTION) subscription_fake["current_period_end"] = datetime_to_unix( timezone.now() + timezone.timedelta(days=7) ) subscription_create_mock.return_value = subscription_fake self.customer.subscribe(plan=plan, charge_immediately=False) self.customer.has_active_subscription(plan=plan.id) @patch("djstripe.models.InvoiceItem.sync_from_stripe_data", return_value="pancakes") @patch("stripe.InvoiceItem.create", return_value=deepcopy(FAKE_INVOICEITEM)) def test_add_invoice_item(self, invoiceitem_create_mock, invoiceitem_sync_mock): invoiceitem = self.customer.add_invoice_item( amount=decimal.Decimal("50.00"), currency="eur", description="test", invoice=77, subscription=25, ) self.assertEqual("pancakes", invoiceitem) invoiceitem_create_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, amount=5000, customer=self.customer.id, currency="eur", description="test", discountable=None, invoice=77, metadata=None, subscription=25, ) @patch("djstripe.models.InvoiceItem.sync_from_stripe_data", return_value="pancakes") @patch("stripe.InvoiceItem.create", return_value=deepcopy(FAKE_INVOICEITEM)) def test_add_invoice_item_djstripe_objects( self, invoiceitem_create_mock, invoiceitem_sync_mock ): invoiceitem = self.customer.add_invoice_item( amount=decimal.Decimal("50.00"), currency="eur", description="test", invoice=Invoice(id=77), subscription=Subscription(id=25), ) self.assertEqual("pancakes", invoiceitem) invoiceitem_create_mock.assert_called_once_with( api_key=STRIPE_SECRET_KEY, amount=5000, customer=self.customer.id, currency="eur", description="test", discountable=None, invoice=77, metadata=None, subscription=25, ) def test_add_invoice_item_bad_decimal(self): with self.assertRaisesMessage( ValueError, "You must supply a decimal value representing dollars." ): self.customer.add_invoice_item(amount=5000, currency="usd") @patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN)) @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Invoice.upcoming", return_value=deepcopy(FAKE_UPCOMING_INVOICE)) def test_upcoming_invoice( self, invoice_upcoming_mock, subscription_retrieve_mock, plan_retrieve_mock ): invoice = self.customer.upcoming_invoice() self.assertIsNotNone(invoice) self.assertIsNone(invoice.id) self.assertIsNone(invoice.save()) subscription_retrieve_mock.assert_called_once_with( api_key=ANY, expand=ANY, id=FAKE_SUBSCRIPTION["id"] ) plan_retrieve_mock.assert_not_called() items = invoice.invoiceitems.all() self.assertEqual(1, len(items)) self.assertEqual(FAKE_SUBSCRIPTION["id"], items[0].id) self.assertIsNotNone(invoice.plan) self.assertEqual(FAKE_PLAN["id"], invoice.plan.id) invoice._invoiceitems = [] items = invoice.invoiceitems.all() self.assertEqual(0, len(items)) self.assertIsNotNone(invoice.plan) @patch("stripe.Customer.retrieve") def test_delete_subscriber_purges_customer(self, customer_retrieve_mock): self.user.delete() customer = Customer.objects.get(id=FAKE_CUSTOMER["id"]) self.assertIsNotNone(customer.date_purged) @patch("stripe.Customer.retrieve") def test_delete_subscriber_without_customer_is_noop(self, customer_retrieve_mock): self.user.delete() for customer in self.user.djstripe_customers.all(): self.assertIsNone(customer.date_purged)
''' Created on Jul 31, 2014 @author: tangliuxiang ''' import subprocess import tempfile import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from formatters.log import Log DEBUG = False DD_BLOCK_SIZE = 512 class Shell(object): """ Subprocess to run shell command """ def run(self, cmd, out=None, printout=DEBUG): """ Run command in shell. """ if printout is False: cmd = "%s 2>&1 > /dev/null" % cmd subp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: buff = subp.stdout.readline().strip('\n') if printout: print buff if out is not None: out.write(buff) if buff == '' and subp.poll() != None: break if subp.returncode > 0: return False return True class AdbShell(Shell): """ Subprocess to run shell command """ ANDROID_TMP = "/data/local/tmp" TAG = "adb" def run(self, cmd, out=None, printout=DEBUG): """ Run command in shell. """ outTmp = None if out is not None: outTmp = tempfile.mktemp(dir=AdbShell.ANDROID_TMP) cmd = 'echo \'%s > %s && chmod 777 %s; exit $?\'| adb shell' % (cmd, outTmp, outTmp) else: cmd = 'echo \'%s; exit $?\' | adb shell' % cmd ret = self.__superrun__(cmd, None, printout) if outTmp is not None and out is not None: self.pull(outTmp, out, False) self.__superrun__("rm -r %s" % outTmp, None, False) return ret def __superrun__(self, cmd, out=None, printout=DEBUG): self.waitdevices() return super(AdbShell, self).run(cmd, out, printout) def push(self, inFile, outFile, printout=DEBUG): cmd = "adb push %s %s" % (inFile, outFile) return self.__superrun__(cmd) def pull(self, inFile, outFile, printout=DEBUG): cmd = "adb pull %s %s" % (inFile, outFile) return self.__superrun__(cmd) def waitdevices(self, printout=DEBUG): if printout: Log.i(AdbShell.TAG, "waiting for devices....") return super(AdbShell, self).run("adb wait-for-device", None, printout) class SuShell(AdbShell): ''' classdocs ''' def run(self, cmd, out=None, printout=DEBUG): """ Run command in shell. """ cmd = "su -c \"%s\"" % (cmd) return super(SuShell, self).run(cmd, out, printout) class ShellFactory(object): mShell = None @staticmethod def __getRootShell__(): subp = subprocess.Popen(["check-su"], stdout=subprocess.PIPE) subp.communicate() if subp.returncode == 0: Log.i("AdbShell", "use su to root") return SuShell() else: Log.i("AdbShell", "Can not use su to root, assume your phone has already been root with modify default.prop in boot!") Log.i("AdbShell", "Try adb root, it may be blocked!") subp = subprocess.Popen(["adb", "root"], stdout=subprocess.PIPE) subp.communicate() Log.i("AdbShell", "Root successfull") return AdbShell() @staticmethod def getDefaultShell(): if ShellFactory.mShell is None: ShellFactory.mShell = ShellFactory.__getRootShell__() return ShellFactory.mShell class AndroidFile(): def __init__(self, path, shell=None): self.mPath = path if shell is None: self.mShell = ShellFactory.getDefaultShell() else: self.mShell = shell def getPath(self): return self.mPath def read(self, start=0, size= -DD_BLOCK_SIZE): outStr = None skip = start / DD_BLOCK_SIZE count = size / DD_BLOCK_SIZE + 1 phoneTmp = self.__readToPhoneTmp__(skip, count) if phoneTmp is not None: pcTmp = self.__pullToPc__(phoneTmp) if pcTmp is not None and os.path.isfile(pcTmp): pcTmpFile = file(pcTmp) if size > 0: pcStart = start % DD_BLOCK_SIZE pcEnd = (size % DD_BLOCK_SIZE - DD_BLOCK_SIZE) % DD_BLOCK_SIZE pcTmpFile.seek(pcStart) outStr = file(pcTmp).read()[:pcEnd] else: outStr = file(pcTmp).read() os.remove(pcTmp) AndroidFile(phoneTmp).remove() return outStr def pull(self, dst, start=0, size= -DD_BLOCK_SIZE): skip = start / DD_BLOCK_SIZE count = size / DD_BLOCK_SIZE + 1 phoneTmp = self.__readToPhoneTmp__(skip, count) if phoneTmp is not None: self.__pullToPc__(phoneTmp, dst) AndroidFile(phoneTmp).remove() if os.path.isfile(dst): return True return False def remove(self): return self.mShell.run("rm -r %s" % (self.mPath)) def append(self, fstr): return self.__writeinternal__(fstr, True) def write(self, fstr): return self.__writeinternal__(fstr, False) def dd_write(self, pcFile, start=0, size= -DD_BLOCK_SIZE): if size > os.path.getsize(pcFile): size = -DD_BLOCK_SIZE skip = start / DD_BLOCK_SIZE count = size / DD_BLOCK_SIZE + 1 inFile = self.__pushToPhoneTmp__(pcFile) if count <= 0: cmd = "dd if=%s of=%s seek=%s; chmod 777 %s" % (inFile, self.mPath, skip, self.mPath) else: cmd = "dd if=%s of=%s seek=%s count=%s; chmod 777 %s" % (inFile, self.mPath, skip, count, self.mPath) print cmd return self.mShell.run(cmd) def __writeToPcTmp__(self, fstr): inFilePath = tempfile.mktemp() inFile = file(inFilePath, "w+") inFile.write(fstr) inFile.close() return inFilePath def exist(self): outTmp = tempfile.mktemp() ret = False if self.mShell.run(r'if [ -e %s ]; then echo True; else echo False; fi' % (self.mPath), outTmp): if os.path.isfile(outTmp) and file(outTmp).read().strip("\n") == "True": ret = True os.remove(outTmp) return True def __readToPhoneTmp__(self, skip, count): outFile = tempfile.mktemp(dir=AdbShell.ANDROID_TMP) if count <= 0: cmd = "dd if=%s of=%s skip=%s; chmod 777 %s" % (self.mPath, outFile, skip, outFile) else: cmd = "dd if=%s of=%s skip=%s count=%s; chmod 777 %s" % (self.mPath, outFile, skip, count, outFile) if self.mShell.run(cmd): return outFile else: return None def __pullToPc__(self, phoneTmp, pcOut=None): if pcOut is None: pcOut = tempfile.mktemp() self.mShell.pull(phoneTmp, pcOut) return pcOut def __pushToPhoneTmp__(self, inFilePath): outFile = tempfile.mktemp(dir=AdbShell.ANDROID_TMP) if self.mShell.push(inFilePath, outFile): return outFile else: return None def __writeinternal__(self, fstr, append=True): inFilePath = self.__writeToPcTmp__(fstr) outFile = self.__pushToPhoneTmp__(inFilePath) if append: ret = self.mShell.run("cat %s >> %s" % (outFile, self.mPath), str) else: ret = self.mShell.run("cat %s > %s" % (outFile, self.mPath), str) AndroidFile(outFile).remove() os.remove(inFilePath) return ret
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Image assessment algorithms. Typical overlap and error computation measures to evaluate results from other processing units. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) ''' import os import os.path as op import nibabel as nb import numpy as np from math import floor, ceil from scipy.ndimage.morphology import grey_dilation from scipy.ndimage.morphology import binary_erosion from scipy.spatial.distance import cdist, euclidean, dice, jaccard from scipy.ndimage.measurements import center_of_mass, label from scipy.special import legendre import scipy.io as sio import itertools import scipy.stats as stats from .. import logging from ..utils.misc import package_check from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, InputMultiPath, OutputMultiPath, BaseInterfaceInputSpec, isdefined) from ..utils.filemanip import fname_presuffix, split_filename iflogger = logging.getLogger('interface') class DistanceInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, mandatory=True, desc="Has to have the same dimensions as volume2.") volume2 = File( exists=True, mandatory=True, desc="Has to have the same dimensions as volume1." ) method = traits.Enum( "eucl_min", "eucl_cog", "eucl_mean", "eucl_wmean", "eucl_max", desc='""eucl_min": Euclidean distance between two closest points\ "eucl_cog": mean Euclidian distance between the Center of Gravity\ of volume1 and CoGs of volume2\ "eucl_mean": mean Euclidian minimum distance of all volume2 voxels\ to volume1\ "eucl_wmean": mean Euclidian minimum distance of all volume2 voxels\ to volume1 weighted by their values\ "eucl_max": maximum over minimum Euclidian distances of all volume2\ voxels to volume1 (also known as the Hausdorff distance)', usedefault=True ) mask_volume = File( exists=True, desc="calculate overlap only within this mask.") class DistanceOutputSpec(TraitedSpec): distance = traits.Float() point1 = traits.Array(shape=(3,)) point2 = traits.Array(shape=(3,)) histogram = File() class Distance(BaseInterface): """Calculates distance between two volumes. """ input_spec = DistanceInputSpec output_spec = DistanceOutputSpec _hist_filename = "hist.pdf" def _find_border(self, data): eroded = binary_erosion(data) border = np.logical_and(data, np.logical_not(eroded)) return border def _get_coordinates(self, data, affine): if len(data.shape) == 4: data = data[:, :, :, 0] indices = np.vstack(np.nonzero(data)) indices = np.vstack((indices, np.ones(indices.shape[1]))) coordinates = np.dot(affine, indices) return coordinates[:3, :] def _eucl_min(self, nii1, nii2): origdata1 = nii1.get_data().astype(np.bool) border1 = self._find_border(origdata1) origdata2 = nii2.get_data().astype(np.bool) border2 = self._find_border(origdata2) set1_coordinates = self._get_coordinates(border1, nii1.get_affine()) set2_coordinates = self._get_coordinates(border2, nii2.get_affine()) dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) (point1, point2) = np.unravel_index( np.argmin(dist_matrix), dist_matrix.shape) return (euclidean(set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]), set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]) def _eucl_cog(self, nii1, nii2): origdata1 = nii1.get_data().astype(np.bool) cog_t = np.array(center_of_mass(origdata1)).reshape(-1, 1) cog_t = np.vstack((cog_t, np.array([1]))) cog_t_coor = np.dot(nii1.get_affine(), cog_t)[:3, :] origdata2 = nii2.get_data().astype(np.bool) (labeled_data, n_labels) = label(origdata2) cogs = np.ones((4, n_labels)) for i in range(n_labels): cogs[:3, i] = np.array(center_of_mass(origdata2, labeled_data, i + 1)) cogs_coor = np.dot(nii2.get_affine(), cogs)[:3, :] dist_matrix = cdist(cog_t_coor.T, cogs_coor.T) return np.mean(dist_matrix) def _eucl_mean(self, nii1, nii2, weighted=False): origdata1 = nii1.get_data().astype(np.bool) border1 = self._find_border(origdata1) origdata2 = nii2.get_data().astype(np.bool) set1_coordinates = self._get_coordinates(border1, nii1.get_affine()) set2_coordinates = self._get_coordinates(origdata2, nii2.get_affine()) dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) min_dist_matrix = np.amin(dist_matrix, axis=0) import matplotlib.pyplot as plt plt.figure() plt.hist(min_dist_matrix, 50, normed=1, facecolor='green') plt.savefig(self._hist_filename) plt.clf() plt.close() if weighted: return np.average( min_dist_matrix, weights=nii2.get_data()[origdata2].flat ) else: return np.mean(min_dist_matrix) def _eucl_max(self, nii1, nii2): origdata1 = nii1.get_data() origdata1 = np.logical_not( np.logical_or(origdata1 == 0, np.isnan(origdata1))) origdata2 = nii2.get_data() origdata2 = np.logical_not( np.logical_or(origdata2 == 0, np.isnan(origdata2))) if isdefined(self.inputs.mask_volume): maskdata = nb.load(self.inputs.mask_volume).get_data() maskdata = np.logical_not( np.logical_or(maskdata == 0, np.isnan(maskdata))) origdata1 = np.logical_and(maskdata, origdata1) origdata2 = np.logical_and(maskdata, origdata2) if origdata1.max() == 0 or origdata2.max() == 0: return np.NaN border1 = self._find_border(origdata1) border2 = self._find_border(origdata2) set1_coordinates = self._get_coordinates(border1, nii1.get_affine()) set2_coordinates = self._get_coordinates(border2, nii2.get_affine()) distances = cdist(set1_coordinates.T, set2_coordinates.T) mins = np.concatenate( (np.amin(distances, axis=0), np.amin(distances, axis=1))) return np.max(mins) def _run_interface(self, runtime): nii1 = nb.load(self.inputs.volume1) nii2 = nb.load(self.inputs.volume2) if self.inputs.method == "eucl_min": self._distance, self._point1, self._point2 = self._eucl_min( nii1, nii2) elif self.inputs.method == "eucl_cog": self._distance = self._eucl_cog(nii1, nii2) elif self.inputs.method == "eucl_mean": self._distance = self._eucl_mean(nii1, nii2) elif self.inputs.method == "eucl_wmean": self._distance = self._eucl_mean(nii1, nii2, weighted=True) elif self.inputs.method == "eucl_max": self._distance = self._eucl_max(nii1, nii2) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['distance'] = self._distance if self.inputs.method == "eucl_min": outputs['point1'] = self._point1 outputs['point2'] = self._point2 elif self.inputs.method in ["eucl_mean", "eucl_wmean"]: outputs['histogram'] = os.path.abspath(self._hist_filename) return outputs class OverlapInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, mandatory=True, desc='Has to have the same dimensions as volume2.') volume2 = File(exists=True, mandatory=True, desc='Has to have the same dimensions as volume1.') mask_volume = File(exists=True, desc='calculate overlap only within this mask.') bg_overlap = traits.Bool(False, usedefault=True, mandatory=True, desc='consider zeros as a label') out_file = File('diff.nii', usedefault=True) weighting = traits.Enum('none', 'volume', 'squared_vol', usedefault=True, desc=('\'none\': no class-overlap weighting is ' 'performed. \'volume\': computed class-' 'overlaps are weighted by class volume ' '\'squared_vol\': computed class-overlaps ' 'are weighted by the squared volume of ' 'the class')) vol_units = traits.Enum('voxel', 'mm', mandatory=True, usedefault=True, desc='units for volumes') class OverlapOutputSpec(TraitedSpec): jaccard = traits.Float(desc='averaged jaccard index') dice = traits.Float(desc='averaged dice index') roi_ji = traits.List(traits.Float(), desc=('the Jaccard index (JI) per ROI')) roi_di = traits.List(traits.Float(), desc=('the Dice index (DI) per ROI')) volume_difference = traits.Float(desc=('averaged volume difference')) roi_voldiff = traits.List(traits.Float(), desc=('volume differences of ROIs')) labels = traits.List(traits.Int(), desc=('detected labels')) diff_file = File(exists=True, desc='error map of differences') class Overlap(BaseInterface): """ Calculates Dice and Jaccard's overlap measures between two ROI maps. The interface is backwards compatible with the former version in which only binary files were accepted. The averaged values of overlap indices can be weighted. Volumes now can be reported in :math:`mm^3`, although they are given in voxels to keep backwards compatibility. Example ------- >>> overlap = Overlap() >>> overlap.inputs.volume1 = 'cont1.nii' >>> overlap.inputs.volume2 = 'cont2.nii' >>> res = overlap.run() # doctest: +SKIP """ input_spec = OverlapInputSpec output_spec = OverlapOutputSpec def _bool_vec_dissimilarity(self, booldata1, booldata2, method): methods = {'dice': dice, 'jaccard': jaccard} if not (np.any(booldata1) or np.any(booldata2)): return 0 return 1 - methods[method](booldata1.flat, booldata2.flat) def _run_interface(self, runtime): nii1 = nb.load(self.inputs.volume1) nii2 = nb.load(self.inputs.volume2) scale = 1.0 if self.inputs.vol_units == 'mm': voxvol = nii1.get_header().get_zooms() for i in xrange(nii1.get_data().ndim-1): scale = scale * voxvol[i] data1 = nii1.get_data() data1[np.logical_or(data1 < 0, np.isnan(data1))] = 0 max1 = int(data1.max()) data1 = data1.astype(np.min_scalar_type(max1)) data2 = nii2.get_data().astype(np.min_scalar_type(max1)) data2[np.logical_or(data1 < 0, np.isnan(data1))] = 0 max2 = data2.max() maxlabel = max(max1, max2) if isdefined(self.inputs.mask_volume): maskdata = nb.load(self.inputs.mask_volume).get_data() maskdata = ~np.logical_or(maskdata == 0, np.isnan(maskdata)) data1[~maskdata] = 0 data2[~maskdata] = 0 res = [] volumes1 = [] volumes2 = [] labels = np.unique(data1[data1 > 0].reshape(-1)).tolist() if self.inputs.bg_overlap: labels.insert(0, 0) for l in labels: res.append(self._bool_vec_dissimilarity(data1 == l, data2 == l, method='jaccard')) volumes1.append(scale * len(data1[data1 == l])) volumes2.append(scale * len(data2[data2 == l])) results = dict(jaccard=[], dice=[]) results['jaccard'] = np.array(res) results['dice'] = 2.0*results['jaccard'] / (results['jaccard'] + 1.0) weights = np.ones((len(volumes1),), dtype=np.float32) if self.inputs.weighting != 'none': weights = weights / np.array(volumes1) if self.inputs.weighting == 'squared_vol': weights = weights**2 weights = weights / np.sum(weights) both_data = np.zeros(data1.shape) both_data[(data1 - data2) != 0] = 1 nb.save(nb.Nifti1Image(both_data, nii1.get_affine(), nii1.get_header()), self.inputs.out_file) self._labels = labels self._ove_rois = results self._vol_rois = ((np.array(volumes1) - np.array(volumes2)) / np.array(volumes1)) self._dice = round(np.sum(weights*results['dice']), 5) self._jaccard = round(np.sum(weights*results['jaccard']), 5) self._volume = np.sum(weights*self._vol_rois) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['labels'] = self._labels outputs['jaccard'] = self._jaccard outputs['dice'] = self._dice outputs['volume_difference'] = self._volume outputs['roi_ji'] = self._ove_rois['jaccard'].tolist() outputs['roi_di'] = self._ove_rois['dice'].tolist() outputs['roi_voldiff'] = self._vol_rois.tolist() outputs['diff_file'] = os.path.abspath(self.inputs.out_file) return outputs class FuzzyOverlapInputSpec(BaseInterfaceInputSpec): in_ref = InputMultiPath( File(exists=True), mandatory=True, desc='Reference image. Requires the same dimensions as in_tst.') in_tst = InputMultiPath( File(exists=True), mandatory=True, desc='Test image. Requires the same dimensions as in_ref.') weighting = traits.Enum('none', 'volume', 'squared_vol', usedefault=True, desc=('\'none\': no class-overlap weighting is ' 'performed. \'volume\': computed class-' 'overlaps are weighted by class volume ' '\'squared_vol\': computed class-overlaps ' 'are weighted by the squared volume of ' 'the class')) out_file = File('diff.nii', desc='alternative name for resulting difference-map', usedefault=True) class FuzzyOverlapOutputSpec(TraitedSpec): jaccard = traits.Float( desc='Fuzzy Jaccard Index (fJI), all the classes' ) dice = traits.Float( desc='Fuzzy Dice Index (fDI), all the classes' ) diff_file = File(exists=True, desc='resulting difference-map of all classes, using the chosen weighting' ) class_fji = traits.List( traits.Float(), desc='Array containing the fJIs of each computed class' ) class_fdi = traits.List( traits.Float(), desc='Array containing the fDIs of each computed class' ) class FuzzyOverlap(BaseInterface): """Calculates various overlap measures between two maps, using the fuzzy definition proposed in: Crum et al., Generalized Overlap Measures for Evaluation and Validation in Medical Image Analysis, IEEE Trans. Med. Ima. 25(11),pp 1451-1461, Nov. 2006. in_ref and in_tst are lists of 2/3D images, each element on the list containing one volume fraction map of a class in a fuzzy partition of the domain. Example ------- >>> overlap = FuzzyOverlap() >>> overlap.inputs.in_ref = [ 'ref_class0.nii', 'ref_class1.nii' ] >>> overlap.inputs.in_tst = [ 'tst_class0.nii', 'tst_class1.nii' ] >>> overlap.inputs.weighting = 'volume' >>> res = overlap.run() # doctest: +SKIP """ input_spec = FuzzyOverlapInputSpec output_spec = FuzzyOverlapOutputSpec def _run_interface(self, runtime): ncomp = len(self.inputs.in_ref) assert( ncomp == len(self.inputs.in_tst) ) weights = np.ones( shape=ncomp ) img_ref = np.array( [ nb.load( fname ).get_data() for fname in self.inputs.in_ref ] ) img_tst = np.array( [ nb.load( fname ).get_data() for fname in self.inputs.in_tst ] ) msk = np.sum(img_ref, axis=0) msk[msk>0] = 1.0 tst_msk = np.sum(img_tst, axis=0) tst_msk[tst_msk>0] = 1.0 #check that volumes are normalized #img_ref[:][msk>0] = img_ref[:][msk>0] / (np.sum( img_ref, axis=0 ))[msk>0] #img_tst[tst_msk>0] = img_tst[tst_msk>0] / np.sum( img_tst, axis=0 )[tst_msk>0] self._jaccards = [] volumes = [] diff_im = np.zeros( img_ref.shape ) for ref_comp, tst_comp, diff_comp in zip( img_ref, img_tst, diff_im ): num = np.minimum( ref_comp, tst_comp ) ddr = np.maximum( ref_comp, tst_comp ) diff_comp[ddr>0]+= 1.0-(num[ddr>0]/ddr[ddr>0]) self._jaccards.append( np.sum( num ) / np.sum( ddr ) ) volumes.append( np.sum( ref_comp ) ) self._dices = 2.0*np.array(self._jaccards) / (np.array(self._jaccards) +1.0 ) if self.inputs.weighting != "none": weights = 1.0 / np.array(volumes) if self.inputs.weighting == "squared_vol": weights = weights**2 weights = weights / np.sum( weights ) setattr( self, '_jaccard', np.sum( weights * self._jaccards ) ) setattr( self, '_dice', np.sum( weights * self._dices ) ) diff = np.zeros( diff_im[0].shape ) for w,ch in zip(weights,diff_im): ch[msk==0] = 0 diff+= w* ch nb.save(nb.Nifti1Image(diff, nb.load( self.inputs.in_ref[0]).get_affine(), nb.load(self.inputs.in_ref[0]).get_header()), self.inputs.out_file) return runtime def _list_outputs(self): outputs = self._outputs().get() for method in ("dice", "jaccard"): outputs[method] = getattr(self, '_' + method) #outputs['volume_difference'] = self._volume outputs['diff_file'] = os.path.abspath(self.inputs.out_file) outputs['class_fji'] = np.array(self._jaccards).astype(float).tolist(); outputs['class_fdi']= self._dices.astype(float).tolist(); return outputs class ErrorMapInputSpec(BaseInterfaceInputSpec): in_ref = File(exists=True, mandatory=True, desc="Reference image. Requires the same dimensions as in_tst.") in_tst = File(exists=True, mandatory=True, desc="Test image. Requires the same dimensions as in_ref.") mask = File(exists=True, desc="calculate overlap only within this mask.") metric = traits.Enum("sqeuclidean", "euclidean", desc='error map metric (as implemented in scipy cdist)', usedefault=True, mandatory=True) out_map = File(desc="Name for the output file") class ErrorMapOutputSpec(TraitedSpec): out_map = File(exists=True, desc="resulting error map") distance = traits.Float(desc="Average distance between volume 1 and 2") class ErrorMap(BaseInterface): """ Calculates the error (distance) map between two input volumes. Example ------- >>> errormap = ErrorMap() >>> errormap.inputs.in_ref = 'cont1.nii' >>> errormap.inputs.in_tst = 'cont2.nii' >>> res = errormap.run() # doctest: +SKIP """ input_spec = ErrorMapInputSpec output_spec = ErrorMapOutputSpec _out_file = '' def _run_interface(self, runtime): # Get two numpy data matrices nii_ref = nb.load(self.inputs.in_ref) ref_data = np.squeeze(nii_ref.get_data()) tst_data = np.squeeze(nb.load(self.inputs.in_tst).get_data()) assert(ref_data.ndim == tst_data.ndim) # Load mask comps = 1 mapshape = ref_data.shape if (ref_data.ndim == 4): comps = ref_data.shape[-1] mapshape = ref_data.shape[:-1] if isdefined(self.inputs.mask): msk = nb.load( self.inputs.mask ).get_data() if (mapshape != msk.shape): raise RuntimeError("Mask should match volume shape, \ mask is %s and volumes are %s" % (list(msk.shape), list(mapshape))) else: msk = np.ones(shape=mapshape) # Flatten both volumes and make the pixel differennce mskvector = msk.reshape(-1) msk_idxs = np.where(mskvector==1) refvector = ref_data.reshape(-1,comps)[msk_idxs].astype(np.float32) tstvector = tst_data.reshape(-1,comps)[msk_idxs].astype(np.float32) diffvector = (refvector-tstvector) # Scale the difference if self.inputs.metric == 'sqeuclidean': errvector = diffvector**2 if (comps > 1): errvector = np.sum(errvector, axis=1) else: errvector = np.squeeze(errvector) elif self.inputs.metric == 'euclidean': errvector = np.linalg.norm(diffvector, axis=1) errvectorexp = np.zeros_like(mskvector, dtype=np.float32) # The default type is uint8 errvectorexp[msk_idxs] = errvector # Get averaged error self._distance = np.average(errvector) # Only average the masked voxels errmap = errvectorexp.reshape(mapshape) hdr = nii_ref.get_header().copy() hdr.set_data_dtype(np.float32) hdr['data_type'] = 16 hdr.set_data_shape(mapshape) if not isdefined(self.inputs.out_map): fname,ext = op.splitext(op.basename(self.inputs.in_tst)) if ext=='.gz': fname,ext2 = op.splitext(fname) ext = ext2 + ext self._out_file = op.abspath(fname + "_errmap" + ext) else: self._out_file = self.inputs.out_map nb.Nifti1Image(errmap.astype(np.float32), nii_ref.get_affine(), hdr).to_filename(self._out_file) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs['out_map'] = self._out_file outputs['distance'] = self._distance return outputs class SimilarityInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, desc="3D/4D volume", mandatory=True) volume2 = File(exists=True, desc="3D/4D volume", mandatory=True) mask1 = File(exists=True, desc="3D volume") mask2 = File(exists=True, desc="3D volume") metric = traits.Either(traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'), traits.Callable(), desc="""str or callable Cost-function for assessing image similarity. If a string, one of 'cc': correlation coefficient, 'cr': correlation ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual information, 'nmi': normalized mutual information, 'slr': supervised log-likelihood ratio. If a callable, it should take a two-dimensional array representing the image joint histogram as an input and return a float.""", usedefault=True) class SimilarityOutputSpec(TraitedSpec): similarity = traits.List( traits.Float(desc="Similarity between volume 1 and 2, frame by frame")) class Similarity(BaseInterface): """Calculates similarity between two 3D or 4D volumes. Both volumes have to be in the same coordinate system, same space within that coordinate system and with the same voxel dimensions. .. note:: This interface is an extension of :py:class:`nipype.interfaces.nipy.utils.Similarity` to support 4D files. Requires :py:mod:`nipy` Example ------- >>> from nipype.algorithms.metrics import Similarity >>> similarity = Similarity() >>> similarity.inputs.volume1 = 'rc1s1.nii' >>> similarity.inputs.volume2 = 'rc1s2.nii' >>> similarity.inputs.mask1 = 'mask.nii' >>> similarity.inputs.mask2 = 'mask.nii' >>> similarity.inputs.metric = 'cr' >>> res = similarity.run() # doctest: +SKIP """ input_spec = SimilarityInputSpec output_spec = SimilarityOutputSpec _have_nipy = True def __init__(self, **inputs): try: package_check('nipy') except Exception, e: self._have_nipy = False super(Similarity,self).__init__(**inputs) def _run_interface(self, runtime): if not self._have_nipy: raise RuntimeError('nipy is not installed') from nipy.algorithms.registration.histogram_registration import HistogramRegistration from nipy.algorithms.registration.affine import Affine vol1_nii = nb.load(self.inputs.volume1) vol2_nii = nb.load(self.inputs.volume2) dims = vol1_nii.get_data().ndim if dims==3 or dims==2: vols1 = [ vol1_nii ] vols2 = [ vol2_nii ] if dims==4: vols1 = nb.four_to_three( vol1_nii ) vols2 = nb.four_to_three( vol2_nii ) if dims<2 or dims>4: raise RuntimeError( 'Image dimensions not supported (detected %dD file)' % dims ) if isdefined(self.inputs.mask1): mask1 = nb.load(self.inputs.mask1).get_data() == 1 else: mask1 = None if isdefined(self.inputs.mask2): mask2 = nb.load(self.inputs.mask2).get_data() == 1 else: mask2 = None self._similarity = [] for ts1,ts2 in zip( vols1, vols2 ): histreg = HistogramRegistration(from_img = ts1, to_img = ts2, similarity=self.inputs.metric, from_mask = mask1, to_mask = mask2) self._similarity.append( histreg.eval(Affine()) ) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['similarity'] = self._similarity return outputs
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslotest import mockpatch from tempest_lib import exceptions as lib_exc from tempest_lib.services.compute import images_client from tempest_lib.tests import fake_auth_provider from tempest_lib.tests.services.compute import base class TestImagesClient(base.BaseComputeServiceTest): # Data Dictionaries used for testing # FAKE_IMAGE_METADATA = { "list": {"metadata": { "auto_disk_config": "True", "Label": "Changed" }}, "set_item": {"meta": { "auto_disk_config": "True" }}, "show_item": {"meta": { "kernel_id": "nokernel", }}, "update": {"metadata": { "kernel_id": "False", "Label": "UpdatedImage" }}, "set": {"metadata": { "Label": "Changed", "auto_disk_config": "True" }}, "delete_item": {} } FAKE_IMAGE_DATA = { "list": {"images": [ {"id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ {"href": "http://openstack.example.com/v2/openstack" + "/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" } ], "name": "fakeimage7" }]}, "show": {"image": { "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/openstack" + "/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z"}}, "create": {}, "delete": {} } func2mock = { 'get': 'tempest_lib.common.rest_client.RestClient.get', 'post': 'tempest_lib.common.rest_client.RestClient.post', 'put': 'tempest_lib.common.rest_client.RestClient.put', 'delete': 'tempest_lib.common.rest_client.RestClient.delete'} # Variable definition FAKE_IMAGE_ID = FAKE_IMAGE_DATA['show']['image']['id'] FAKE_SERVER_ID = "80a599e0-31e7-49b7-b260-868f441e343f" FAKE_CREATE_INFO = {'location': 'None'} FAKE_METADATA = FAKE_IMAGE_METADATA['show_item']['meta'] def setUp(self): super(TestImagesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = images_client.ImagesClient(fake_auth, "compute", "regionOne") def _test_image_operation(self, operation="delete", bytes_body=False): response_code = 200 mock_operation = self.func2mock['get'] expected_op = self.FAKE_IMAGE_DATA[operation] params = {"image_id": self.FAKE_IMAGE_ID} headers = None if operation == 'list': function = self.client.list_images elif operation == 'show': function = self.client.show_image elif operation == 'create': function = self.client.create_image mock_operation = self.func2mock['post'] params = {"server_id": self.FAKE_SERVER_ID} response_code = 202 headers = { 'connection': 'keep-alive', 'content-length': '0', 'content-type': 'application/json', 'status': '202', 'x-compute-request-id': 'req-fake', 'vary': 'accept-encoding', 'x-openstack-nova-api-version': 'v2.1', 'date': '13 Oct 2015 05:55:36 GMT', 'location': 'http://fake.com/images/fake' } else: function = self.client.delete_image mock_operation = self.func2mock['delete'] response_code = 204 self.check_service_client_function( function, mock_operation, expected_op, bytes_body, response_code, headers, **params) def _test_image_metadata(self, operation="set_item", bytes_body=False): response_code = 200 expected_op = self.FAKE_IMAGE_METADATA[operation] if operation == 'list': function = self.client.list_image_metadata mock_operation = self.func2mock['get'] params = {"image_id": self.FAKE_IMAGE_ID} elif operation == 'set': function = self.client.set_image_metadata mock_operation = self.func2mock['put'] params = {"image_id": "_dummy_data", "meta": self.FAKE_METADATA} elif operation == 'update': function = self.client.update_image_metadata mock_operation = self.func2mock['post'] params = {"image_id": self.FAKE_IMAGE_ID, "meta": self.FAKE_METADATA} elif operation == 'show_item': mock_operation = self.func2mock['get'] function = self.client.show_image_metadata_item params = {"image_id": self.FAKE_IMAGE_ID, "key": "123"} elif operation == 'delete_item': function = self.client.delete_image_metadata_item mock_operation = self.func2mock['delete'] response_code = 204 params = {"image_id": self.FAKE_IMAGE_ID, "key": "123"} else: function = self.client.set_image_metadata_item mock_operation = self.func2mock['put'] params = {"image_id": self.FAKE_IMAGE_ID, "key": "123", "meta": self.FAKE_METADATA} self.check_service_client_function( function, mock_operation, expected_op, bytes_body, response_code, **params) def _test_resource_deleted(self, bytes_body=False): params = {"id": self.FAKE_IMAGE_ID} expected_op = self.FAKE_IMAGE_DATA['show']['image'] self.useFixture(mockpatch.Patch('tempest_lib.services.compute' '.images_client.ImagesClient.show_image', side_effect=lib_exc.NotFound)) self.assertEqual(True, self.client.is_resource_deleted(**params)) tempdata = copy.deepcopy(self.FAKE_IMAGE_DATA['show']) tempdata['image']['id'] = None self.useFixture(mockpatch.Patch('tempest_lib.services.compute' '.images_client.ImagesClient.show_image', return_value=expected_op)) self.assertEqual(False, self.client.is_resource_deleted(**params)) def test_list_images_with_str_body(self): self._test_image_operation('list') def test_list_images_with_bytes_body(self): self._test_image_operation('list', True) def test_show_image_with_str_body(self): self._test_image_operation('show') def test_show_image_with_bytes_body(self): self._test_image_operation('show', True) def test_create_image_with_str_body(self): self._test_image_operation('create') def test_create_image_with_bytes_body(self): self._test_image_operation('create', True) def test_delete_image_with_str_body(self): self._test_image_operation('delete') def test_delete_image_with_bytes_body(self): self._test_image_operation('delete', True) def test_list_image_metadata_with_str_body(self): self._test_image_metadata('list') def test_list_image_metadata_with_bytes_body(self): self._test_image_metadata('list', True) def test_set_image_metadata_with_str_body(self): self._test_image_metadata('set') def test_set_image_metadata_with_bytes_body(self): self._test_image_metadata('set', True) def test_update_image_metadata_with_str_body(self): self._test_image_metadata('update') def test_update_image_metadata_with_bytes_body(self): self._test_image_metadata('update', True) def test_set_image_metadata_item_with_str_body(self): self._test_image_metadata() def test_set_image_metadata_item_with_bytes_body(self): self._test_image_metadata(bytes_body=True) def test_show_image_metadata_item_with_str_body(self): self._test_image_metadata('show_item') def test_show_image_metadata_item_with_bytes_body(self): self._test_image_metadata('show_item', True) def test_delete_image_metadata_item_with_str_body(self): self._test_image_metadata('delete_item') def test_delete_image_metadata_item_with_bytes_body(self): self._test_image_metadata('delete_item', True) def test_resource_delete_with_str_body(self): self._test_resource_deleted() def test_resource_delete_with_bytes_body(self): self._test_resource_deleted(True)
import sqlite3 from library.app import app import library.database as database import flask from flask import jsonify import library.session as session class SiteNotFound(Exception): pass class RoomNotFound(Exception): pass def _serialize_sites(rooms): site_indexes = {} sites = [] for room in rooms: site_id = room["site_id"] if site_id not in site_indexes.keys(): # Store the index of this site id site_indexes[site_id] = len(sites) sites.append({ "id": site_id, "name": room["site_name"], "rooms": []}) if room["room_id"]: # It could be so that the left join returns an empty site site_index = site_indexes[site_id] sites[site_index]["rooms"].append({ "id": room["room_id"], "name": room["room_name"]}) return sites def _serialize_room(room): return {"name": room["room_name"], "id": room["room_id"]} def _serialize_rooms(rooms): return [_serialize_room(r) for r in rooms] def _get_site(site_id): db_instance = database.get() curs = db_instance.execute('SELECT * FROM sites ' 'LEFT JOIN rooms USING (site_id) ' 'WHERE site_id = ? ', (site_id,)) sites = curs.fetchall() if (len(sites) == 0): raise SiteNotFound return _serialize_sites(sites)[0] def _get_sites(): db_instance = database.get() curs = db_instance.execute('SELECT * FROM sites ' 'LEFT JOIN rooms USING (site_id) ' 'ORDER BY site_name DESC') rooms_cursor = curs.fetchall() return _serialize_sites(rooms_cursor) def _get_room(room_id): db_instance = database.get() curs = db_instance.execute('SELECT * FROM rooms ' 'WHERE room_id = ?', (room_id,)) rooms = curs.fetchall() if (len(rooms) == 0): raise RoomNotFound return _serialize_rooms(rooms)[0] def _get_rooms(site_id): db_instance = database.get() curs = db_instance.execute( 'SELECT * FROM rooms WHERE site_id = ? ' 'ORDER BY room_name DESC', (site_id,) ) rooms = curs.fetchall() return _serialize_rooms(rooms) @app.route('/api/sites', methods=['GET']) def get_all_sites(): """ """ return jsonify(_get_sites()) @app.route('/api/sites/<int:site_id>/rooms/<int:room_id>', methods=['DELETE']) @session.admin_required def delete_room(site_id, room_id): try: response = jsonify(_get_room(room_id)) except RoomNotFound: response = jsonify({'msg': 'Room not found'}) response.status_code = 404 return response db = database.get() books_cursor = db.cursor() books_cursor.execute('SELECT * FROM books WHERE room_id = ?', (room_id,)) books = books_cursor.fetchall() if len(books) == 0: books_cursor.execute( 'DELETE FROM rooms ' 'WHERE room_id = ?', (room_id,)) db.commit() else: response = jsonify({ 'msg': 'Room currently has books linked to it.\ Make sure the room is empty before deleting this room' }) response.status_code = 403 return response @app.route('/api/sites', methods=['POST']) @session.admin_required def add_new_site(): """ """ post_data = flask.request.get_json() if post_data is None: response = jsonify({'msg': 'Missing json data in post request.'}) response.status_code = 400 return response elif 'name' not in post_data: response = jsonify({'msg': 'Missing site name in post request.'}) response.status_code = 400 return response try: db_instance = database.get() cursor = db_instance.cursor() cursor.execute( 'INSERT INTO sites ' '(site_name) VALUES (?)', (post_data['name'],) ) db_instance.commit() last_id = cursor.lastrowid last_added_site = _get_site(last_id) response = jsonify(last_added_site) response.status_code = 200 except sqlite3.IntegrityError as err: response = jsonify({"msg": "A site with that name already exist"}) response.status_code = 409 return response @app.route('/api/sites/<int:site_id>', methods=['GET']) def get_site(site_id): """ """ try: response = jsonify(_get_site(site_id)) except SiteNotFound: response = jsonify({'msg': 'Site not found'}) response.status_code = 404 return response return response @app.route('/api/sites/<int:site_id>', methods=['PUT']) @session.admin_required def rename_site(site_id): put_data = flask.request.get_json() if put_data is None: response = jsonify({'msg': 'Missing data in put request.'}) response.status_code = 400 return response if 'name' in put_data: db = database.get() cursor = db.cursor() cursor.execute( 'UPDATE sites ' 'SET site_name = ? ' 'WHERE site_id = ?', (put_data['name'], site_id)) db.commit() response = jsonify(_get_site(site_id)) response.status_code = 200 return response @app.route('/api/sites/<int:site_id>/rooms', methods=['GET']) def get_rooms(site_id): response = jsonify(_get_rooms(site_id)) return jsonify(_get_rooms(site_id)) @app.route('/api/sites/<int:site_id>/rooms', methods=['POST']) @session.admin_required def post_new_room(site_id): """ """ post_data = flask.request.get_json() if post_data is None: response = jsonify({'msg': 'Missing json data in post request.'}) response.status_code = 400 return response elif 'name' not in post_data: response = jsonify( {'msg': 'Expected parameters in post request: name'}) response.status_code = 400 return response db_instance = database.get() existing_room = db_instance.execute( 'SELECT * FROM rooms ' 'WHERE site_id = ? ' 'AND room_name = ?', (site_id, post_data['name']) ) if len(existing_room.fetchall()) > 0: # Do not allow duplicate room names within a site response = jsonify({"msg": 'A name with that name already ' 'exists for this site'}) response.status_code = 409 return response cursor = db_instance.cursor() cursor.execute( 'INSERT INTO rooms ' '(room_name, site_id) VALUES (?, ?)', (post_data['name'], site_id, ) ) db_instance.commit() last_id = cursor.lastrowid last_added_room = _get_room(last_id) response = jsonify(last_added_room) response.status_code = 200 return response @app.route('/api/sites/<int:site_id>/rooms/<int:room_id>', methods=['GET']) def get_room(site_id, room_id): try: response = jsonify(_get_room(room_id)) except RoomNotFound: response = jsonify({'msg': 'Room not found'}) response.status_code = 404 return response return response @app.route('/api/sites/<int:site_id>/rooms/<int:room_id>', methods=['PUT']) @session.admin_required def rename_room(site_id, room_id): put_data = flask.request.get_json() if put_data is None: response = jsonify({'msg': 'Missing data in put request.'}) response.status_code = 400 return response if 'name' in put_data: db = database.get() cursor = db.cursor() cursor.execute( 'UPDATE rooms ' 'SET room_name = ? ' 'WHERE room_id = ?', (put_data['name'], room_id)) db.commit() response = jsonify(_get_room(room_id)) response.status_code = 200 return response
# # # Copyright (c) 2011-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # from pycalendar.timezone import Timezone from twext.enterprise.dal.syntax import SQLFragment, Parameter from twistedcaldav.test.util import TestCase from twistedcaldav import caldavxml from twistedcaldav.timezones import TimezoneCache from txdav.caldav.datastore.index_file import sqlcalendarquery from txdav.caldav.datastore.query.builder import buildExpression from txdav.caldav.datastore.query.filter import Filter, FilterBase, TimeRange, \ PropertyFilter, TextMatch from txdav.caldav.datastore.query.generator import CalDAVSQLQueryGenerator from txdav.common.datastore.sql_tables import schema import datetime from twistedcaldav.ical import Component class TestQueryFilter(TestCase): _objectSchema = schema.CALENDAR_OBJECT _queryFields = { "UID": _objectSchema.UID, "TYPE": _objectSchema.ICALENDAR_TYPE, } def setUp(self): super(TestQueryFilter, self).setUp() TimezoneCache.create() def test_query(self): """ Basic query test - no time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) expression = buildExpression(filter, self._queryFields) sql = CalDAVSQLQueryGenerator(expression, self, 1234) select, args, usedtimerange = sql.generate() self.assertEqual(select.toSQL(), SQLFragment( "select distinct RESOURCE_NAME, ICALENDAR_UID, ICALENDAR_TYPE from CALENDAR_OBJECT where CALENDAR_RESOURCE_ID = ? and ICALENDAR_TYPE in (?, ?, ?)", [1234, Parameter('arg1', 3)] )) self.assertEqual(args, {"arg1": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}) self.assertEqual(usedtimerange, False) def test_query_timerange(self): """ Basic query test - with time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( *[caldavxml.TimeRange(**{"start": "20060605T160000Z", "end": "20060605T170000Z"})], **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) expression = buildExpression(filter, self._queryFields) sql = CalDAVSQLQueryGenerator(expression, self, 1234) select, args, usedtimerange = sql.generate() self.assertEqual(select.toSQL(), SQLFragment( "select distinct RESOURCE_NAME, ICALENDAR_UID, ICALENDAR_TYPE from CALENDAR_OBJECT, TIME_RANGE where ICALENDAR_TYPE in (?, ?, ?) and (FLOATING = ? and START_DATE < ? and END_DATE > ? or FLOATING = ? and START_DATE < ? and END_DATE > ?) and CALENDAR_OBJECT_RESOURCE_ID = RESOURCE_ID and TIME_RANGE.CALENDAR_RESOURCE_ID = ?", [Parameter('arg1', 3), False, datetime.datetime(2006, 6, 5, 17, 0), datetime.datetime(2006, 6, 5, 16, 0), True, datetime.datetime(2006, 6, 5, 13, 0), datetime.datetime(2006, 6, 5, 12, 0), 1234] )) self.assertEqual(args, {"arg1": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}) self.assertEqual(usedtimerange, True) def test_query_freebusy(self): """ Basic query test - with time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( *[caldavxml.TimeRange(**{"start": "20060605T160000Z", "end": "20060605T170000Z"})], **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) expression = buildExpression(filter, self._queryFields) sql = CalDAVSQLQueryGenerator(expression, self, 1234, "user01", True) select, args, usedtimerange = sql.generate() self.assertEqual(select.toSQL(), SQLFragment( "select distinct RESOURCE_NAME, ICALENDAR_UID, ICALENDAR_TYPE, ORGANIZER, FLOATING, coalesce(ADJUSTED_START_DATE, START_DATE), coalesce(ADJUSTED_END_DATE, END_DATE), FBTYPE, TIME_RANGE.TRANSPARENT, PERUSER.TRANSPARENT from CALENDAR_OBJECT, TIME_RANGE left outer join PERUSER on INSTANCE_ID = TIME_RANGE_INSTANCE_ID and USER_ID = ? where ICALENDAR_TYPE in (?, ?, ?) and (FLOATING = ? and coalesce(ADJUSTED_START_DATE, START_DATE) < ? and coalesce(ADJUSTED_END_DATE, END_DATE) > ? or FLOATING = ? and coalesce(ADJUSTED_START_DATE, START_DATE) < ? and coalesce(ADJUSTED_END_DATE, END_DATE) > ?) and CALENDAR_OBJECT_RESOURCE_ID = RESOURCE_ID and TIME_RANGE.CALENDAR_RESOURCE_ID = ?", ['user01', Parameter('arg1', 3), False, datetime.datetime(2006, 6, 5, 17, 0), datetime.datetime(2006, 6, 5, 16, 0), True, datetime.datetime(2006, 6, 5, 13, 0), datetime.datetime(2006, 6, 5, 12, 0), 1234] )) self.assertEqual(args, {"arg1": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}) self.assertEqual(usedtimerange, True) def test_query_not_extended(self): """ Query test - two terms not anyof """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[ caldavxml.ComponentFilter( **{"name": ("VEVENT")} ), caldavxml.ComponentFilter( **{"name": ("VTODO")} ), ], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) expression = buildExpression(filter, self._queryFields) sql = CalDAVSQLQueryGenerator(expression, self, 1234) select, args, usedtimerange = sql.generate() self.assertEqual(select.toSQL(), SQLFragment( "select distinct RESOURCE_NAME, ICALENDAR_UID, ICALENDAR_TYPE from CALENDAR_OBJECT where CALENDAR_RESOURCE_ID = ? and ICALENDAR_TYPE = ? and ICALENDAR_TYPE = ?", [1234, "VEVENT", "VTODO"] )) self.assertEqual(args, {}) self.assertEqual(usedtimerange, False) def test_query_extended(self): """ Extended query test - two terms with anyof """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[ caldavxml.ComponentFilter( *[caldavxml.TimeRange(**{"start": "20060605T160000Z", })], **{"name": ("VEVENT")} ), caldavxml.ComponentFilter( **{"name": ("VTODO")} ), ], **{"name": "VCALENDAR", "test": "anyof"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) expression = buildExpression(filter, self._queryFields) sql = CalDAVSQLQueryGenerator(expression, self, 1234) select, args, usedtimerange = sql.generate() self.assertEqual(select.toSQL(), SQLFragment( "select distinct RESOURCE_NAME, ICALENDAR_UID, ICALENDAR_TYPE from CALENDAR_OBJECT, TIME_RANGE where (ICALENDAR_TYPE = ? and (FLOATING = ? and END_DATE > ? or FLOATING = ? and END_DATE > ?) or ICALENDAR_TYPE = ?) and CALENDAR_OBJECT_RESOURCE_ID = RESOURCE_ID and TIME_RANGE.CALENDAR_RESOURCE_ID = ?", ['VEVENT', False, datetime.datetime(2006, 6, 5, 16, 0), True, datetime.datetime(2006, 6, 5, 12, 0), 'VTODO', 1234] )) self.assertEqual(args, {}) self.assertEqual(usedtimerange, True) def test_sqllite_query(self): """ Basic query test - single term. Only UID can be queried via sql. """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) sql, args = sqlcalendarquery(filter, 1234) self.assertTrue(sql.find("RESOURCE") != -1) self.assertTrue(sql.find("TIMESPAN") == -1) self.assertTrue(sql.find("PERUSER") == -1) self.assertTrue("VEVENT" in args) class TestQueryFilterSerialize(TestCase): def setUp(self): super(TestQueryFilterSerialize, self).setUp() TimezoneCache.create() def test_query(self): """ Basic query test - no time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) j = filter.serialize() self.assertEqual(j["type"], "Filter") f = FilterBase.deserialize(j) self.assertTrue(isinstance(f, Filter)) def test_timerange_query(self): """ Basic query test with time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( *[caldavxml.TimeRange(**{"start": "20060605T160000Z", "end": "20060605T170000Z"})], **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) j = filter.serialize() self.assertEqual(j["type"], "Filter") f = FilterBase.deserialize(j) self.assertTrue(isinstance(f, Filter)) self.assertTrue(isinstance(f.child.filters[0].qualifier, TimeRange)) self.assertTrue(isinstance(f.child.filters[0].qualifier.tzinfo, Timezone)) self.assertEqual(f.child.filters[0].qualifier.tzinfo.getTimezoneID(), "America/New_York") def test_query_not_extended(self): """ Basic query test with time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[ caldavxml.ComponentFilter( **{"name": ("VEVENT")} ), caldavxml.ComponentFilter( **{"name": ("VTODO")} ), ], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) j = filter.serialize() self.assertEqual(j["type"], "Filter") f = FilterBase.deserialize(j) self.assertTrue(isinstance(f, Filter)) self.assertEqual(len(f.child.filters), 2) def test_query_extended(self): """ Basic query test with time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[ caldavxml.ComponentFilter( *[caldavxml.TimeRange(**{"start": "20060605T160000Z", })], **{"name": ("VEVENT")} ), caldavxml.ComponentFilter( **{"name": ("VTODO")} ), ], **{"name": "VCALENDAR", "test": "anyof"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) j = filter.serialize() self.assertEqual(j["type"], "Filter") f = FilterBase.deserialize(j) self.assertTrue(isinstance(f, Filter)) self.assertEqual(len(f.child.filters), 2) self.assertTrue(isinstance(f.child.filters[0].qualifier, TimeRange)) def test_query_text(self): """ Basic query test with time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[ caldavxml.ComponentFilter( caldavxml.PropertyFilter( caldavxml.TextMatch.fromString("1234", False), name="UID", ), **{"name": ("VEVENT")} ), ], **{"name": "VCALENDAR", "test": "anyof"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) j = filter.serialize() self.assertEqual(j["type"], "Filter") f = FilterBase.deserialize(j) self.assertTrue(isinstance(f, Filter)) self.assertTrue(isinstance(f.child.filters[0].filters[0], PropertyFilter)) self.assertTrue(isinstance(f.child.filters[0].filters[0].qualifier, TextMatch)) self.assertEqual(f.child.filters[0].filters[0].qualifier.text, "1234") class TestQueryFilterMatch(TestCase): def setUp(self): super(TestQueryFilterMatch, self).setUp() TimezoneCache.create() def test_vlarm_undefined(self): filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( caldavxml.IsNotDefined(), **{"name": "VALARM"} )], **{"name": "VEVENT"} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) self.assertFalse(filter.match( Component.fromString("""BEGIN:VCALENDAR CALSCALE:GREGORIAN PRODID:-//Example Inc.//Example Calendar//EN VERSION:2.0 BEGIN:VTIMEZONE LAST-MODIFIED:20040110T032845Z TZID:US/Eastern BEGIN:DAYLIGHT DTSTART:20000404T020000 RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4 TZNAME:EDT TZOFFSETFROM:-0500 TZOFFSETTO:-0400 END:DAYLIGHT BEGIN:STANDARD DTSTART:20001026T020000 RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 TZNAME:EST TZOFFSETFROM:-0400 TZOFFSETTO:-0500 END:STANDARD END:VTIMEZONE BEGIN:VEVENT DTSTAMP:20051222T210412Z CREATED:20060102T150000Z DTSTART;TZID=US/Eastern:20130102T100000 DURATION:PT1H RRULE:FREQ=DAILY;COUNT=5 SUMMARY:event 5 UID:945113826375CBB89184DC36@ninevah.local CATEGORIES:cool,hot CATEGORIES:warm BEGIN:VALARM ACTION:AUDIO TRIGGER;RELATED=START:-PT10M END:VALARM END:VEVENT END:VCALENDAR """)))
import random import time import itertools import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn import preprocessing from keras.preprocessing import sequence from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Activation, Dropout, LSTM, Input, Lambda, TimeDistributed, Merge from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.layers.wrappers import Bidirectional from keras import backend as K from keras.optimizers import RMSprop, Adam, Nadam import keras.callbacks from keras.models import load_model from keras.models import model_from_json from keras.layers.noise import GaussianNoise from keras.regularizers import l1,l2 from keras.constraints import maxnorm from keras import layers from keras.initializers import RandomUniform from data_generator import DataGenerator from losses import ctc_lambda_func def layer_trainable(l, freeze, verbose=False, bidir_fix=True): """ Freeze the Bidirectional Layers The Bidirectional wrapper is buggy and does not support freezing. This is a workaround that freezes each bidirectional layer. """ l.trainable = freeze if bidir_fix: if type(l) == Bidirectional: l.backward_layer.trainable = not freeze l.forward_layer.trainable = not freeze if verbose: if freeze: action='Froze' else : action='Unfroze' print("{} {}".format(action, l.name)) def build_model(maxlen, numfeats_speech, numfeats_skeletal, nb_classes, lab_seq_len): """ """ K.set_learning_phase(1) skeletal_model_file = '../skeletal_network/sk_ctc_lstm_model.json' skeletal_weights = '../skeletal_network/sk_ctc_lstm_weights_best.h5' speech_model_file = '../audio_network/sp_ctc_lstm_model.json' speech_weights = '../audio_network/sp_ctc_lstm_weights_best.h5' json_file = open(skeletal_model_file, 'r') skeletal_model_json = json_file.read() json_file.close() skeletal_model = model_from_json(skeletal_model_json) skeletal_model.load_weights(skeletal_weights) json_file = open(speech_model_file, 'r') speech_model_json = json_file.read() json_file.close() speech_model = model_from_json(speech_model_json) speech_model.load_weights(speech_weights) uni_initializer = RandomUniform(minval=-0.05, maxval=0.05, seed=47) input_shape_a = (maxlen, numfeats_speech) input_shape_s = (maxlen, numfeats_skeletal) input_data_a = Input(name='the_input_audio', shape=input_shape_a, dtype='float32') input_data_s = Input(name='the_input_skeletal', shape=input_shape_s, dtype='float32') input_noise_a = GaussianNoise(stddev=0.5, name='gaussian_noise_a')(input_data_a) input_noise_s = GaussianNoise(stddev=0.0, name='gaussian_noise_s')(input_data_s) blstm_1_a = speech_model.layers[2](input_noise_a) blstm_2_a = speech_model.layers[3](blstm_1_a) res_a_1 = layers.add([blstm_1_a, blstm_2_a], name='speech_residual') blstm_1_s = skeletal_model.layers[2](input_noise_s) blstm_2_s = skeletal_model.layers[3](blstm_1_s) res_s_1 = layers.add([blstm_1_s, blstm_2_s], name='skeletal_residual') model_a = Model(input=[input_data_a], output=res_a_1) model_a.layers[2].name='speech_blstm_1' model_a.layers[3].name='speech_blstm_2' model_s = Model(input=[input_data_s], output=res_s_1) model_s.layers[2].name='skeletal_blstm_1' model_s.layers[3].name='skeletal_blstm_2' # attempt to freeze all Bidirectional layers. # Bidirectional wrapper layer is buggy so we need to freeze the weights this way. frozen_types = [Bidirectional] # Go through layers for both networks and freeze the weights of Bidirectional layers. for l_a,l_s in zip(model_a.layers,model_s.layers): if len(l_a.trainable_weights): if type(l_a) in frozen_types: layer_trainable(l_a, freeze=True, verbose=True) if len(l_s.trainable_weights): if type(l_s) in frozen_types: layer_trainable(l_s, freeze=True, verbose=True) model_a.summary() model_s.summary() merged = Merge([model_a, model_s], mode='concat')([res_a_1,res_s_1]) lstm_3 = Bidirectional(LSTM(100, name='blstm_2', activation='tanh', recurrent_activation='hard_sigmoid', recurrent_dropout=0.0, dropout=0.5, kernel_constraint=maxnorm(3), kernel_initializer=uni_initializer, return_sequences=True), merge_mode='concat')(merged) dropout_3 = Dropout(0.5, name='dropout_layer_3')(lstm_3) inner = Dense(nb_classes, name='dense_1', kernel_initializer=uni_initializer)(dropout_3) y_pred = Activation('softmax', name='softmax')(inner) Model(input=[input_data_a,input_data_s], output=y_pred).summary() labels = Input(name='the_labels', shape=[lab_seq_len], dtype='float32') input_length = Input(name='input_length', shape=[1], dtype='int64') label_length = Input(name='label_length', shape=[1], dtype='int64') loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([y_pred, labels, input_length, label_length]) model = Model(input=[input_data_a,input_data_s, labels, input_length, label_length], output=[loss_out]) adam = Adam(lr=0.0001, clipvalue=0.5, decay=1e-5) # the loss calc occurs elsewhere, so use a dummy lambda func for the loss model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam) return model if __name__ == '__main__': minibatch_size = 2 val_split = 0.2 maxlen = 1900 nb_classes = 22 nb_epoch = 500 numfeats_speech = 39 numfeats_skeletal = 20 dataset='train' data_gen = DataGenerator(minibatch_size=minibatch_size, numfeats_skeletal=numfeats_skeletal, numfeats_speech=numfeats_speech, maxlen=maxlen, dataset=dataset, val_split=val_split, nb_classes=nb_classes) lab_seq_len = data_gen.absolute_max_sequence_len model = build_model(maxlen, numfeats_speech, numfeats_skeletal, nb_classes, lab_seq_len) earlystopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1) filepath="multimodal_ctc_lstm_weights_best.h5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True, mode='auto') print 'Start training.' start_time = time.time() model.fit_generator(generator=data_gen.next_train(), steps_per_epoch=(data_gen.get_size(train=True)/minibatch_size), epochs=nb_epoch, validation_data=data_gen.next_val(), validation_steps=(data_gen.get_size(train=False)/minibatch_size), callbacks=[checkpoint, data_gen]) end_time = time.time() print "--- Training time: %s seconds ---" % (end_time - start_time)
''' Created on Jul 10, 2017 @author: prkrj ''' import pytest import copy import HNFGen as hg def read_file(list_file): file = open(list_file, 'r') i = 0 matrix = [] final_list = [] for line in file: row = line.split() if row != []: new_row = [] for value in row: new_value = int(value) new_row.append(new_value) matrix.append(new_row) i += 1 if i == 3: final_list.append(matrix) matrix = [] i = 0 file.close() return final_list def diff(list1, list2): same = False difference = [] for item1 in list1: same = False for item2 in list2: if item1 == item2: same = True break if same == False: difference.append(copy.deepcopy(item1)) print ("Difference:") print (difference) return def test_body_ortho(): correct = read_file("test/output/body_ortho1_10") test_output = hg.body_ortho_1(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_ortho1_100") test_output = hg.body_ortho_1(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_ortho2_10") test_output = hg.body_ortho_2(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_ortho3_10") test_output = hg.body_ortho_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_ortho3_100") test_output = hg.body_ortho_3(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_ortho4_10") test_output = hg.body_ortho_4(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_ortho4_100") test_output = hg.body_ortho_4(100) assert sorted(test_output) == sorted(correct) print ('Finshed Body Ortho') def test_base_mono(): correct = read_file("test/output/base_mono1_10") test_output = hg.base_mono_1(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_mono1_100") test_output = hg.base_mono_1(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_mono2_10") test_output = hg.base_mono_2(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_mono3_10") test_output = hg.base_mono_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_mono3_100") test_output = hg.base_mono_3(100) assert sorted(test_output) == sorted(correct) print ('Finished Base Mono') def test_base_ortho(): correct = read_file("test/output/base_ortho1_10") test_output = hg.base_ortho_1(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho1_100") test_output = hg.base_ortho_1(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho2_10") test_output = hg.base_ortho_2(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho2_100") test_output = hg.base_ortho_2(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho3_10") test_output = hg.base_ortho_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho3_100") test_output = hg.base_ortho_3(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho3_10") test_output = hg.base_ortho_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/base_ortho3_100") test_output = hg.base_ortho_3(100) assert sorted(test_output) == sorted(correct) def test_body_cubic(): correct = read_file("test/output/body_cubic1_8") test_output = hg.body_cubic_1(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic1_108") test_output = hg.body_cubic_1(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_1(10) == [] correct = read_file("test/output/body_cubic2_8") test_output = hg.body_cubic_2(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic2_108") test_output = hg.body_cubic_2(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_2(10) == [] correct = read_file("test/output/body_cubic3_8") test_output = hg.body_cubic_3(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic3_108") test_output = hg.body_cubic_3(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_3(10) == [] correct = read_file("test/output/body_cubic4_8") test_output = hg.body_cubic_4(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic4_108") test_output = hg.body_cubic_4(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_4(10) == [] correct = read_file("test/output/body_cubic5_8") test_output = hg.body_cubic_5(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic5_108") test_output = hg.body_cubic_5(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_5(10) == [] correct = read_file("test/output/body_cubic6_8") test_output = hg.body_cubic_6(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic6_108") test_output = hg.body_cubic_6(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_6(10) == [] correct = read_file("test/output/body_cubic7_8") test_output = hg.body_cubic_7(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic7_108") test_output = hg.body_cubic_7(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_7(10) == [] correct = read_file("test/output/body_cubic8_8") test_output = hg.body_cubic_8(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_cubic8_108") test_output = hg.body_cubic_8(108) assert sorted(test_output) == sorted(correct) assert hg.body_cubic_8(10) == [] def test_body_tet(): correct = read_file("test/output/body_tet1_10") test_output = hg.body_tet_1(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet1_100") test_output = hg.body_tet_1(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet2_10") test_output = hg.body_tet_2(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet2_100") test_output = hg.body_tet_2(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet3_10") test_output = hg.body_tet_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet3_100") test_output = hg.body_tet_3(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet4_10") test_output = hg.body_tet_4(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet4_100") test_output = hg.body_tet_4(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet5_10") test_output = hg.body_tet_5(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/body_tet5_100") test_output = hg.body_tet_5(100) assert sorted(test_output) == sorted(correct) def test_face_ortho(): correct = read_file("test/output/face_ortho1_10") test_output = hg.face_ortho_1(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho1_100") test_output = hg.face_ortho_1(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho2_10") test_output = hg.face_ortho_2(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho2_100") test_output = hg.face_ortho_2(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho3_10") test_output = hg.face_ortho_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho3_100") test_output = hg.face_ortho_3(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho4_10") test_output = hg.face_ortho_4(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho4_100") test_output = hg.face_ortho_4(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho5_10") test_output = hg.face_ortho_5(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho5_100") test_output = hg.face_ortho_5(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho6_10") test_output = hg.face_ortho_6(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho6_100") test_output = hg.face_ortho_6(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho7_10") test_output = hg.face_ortho_7(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho7_100") test_output = hg.face_ortho_7(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho8_10") test_output = hg.face_ortho_8(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho8_100") test_output = hg.face_ortho_8(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho9_10") test_output = hg.face_ortho_9(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho9_100") test_output = hg.face_ortho_9(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho10_10") test_output = hg.face_ortho_10(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/face_ortho10_100") test_output = hg.face_ortho_10(100) assert sorted(test_output) == sorted(correct) def test_s_cubic(): correct = read_file("test/output/s_cubic1_8") test_output = hg.s_cubic_1(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/s_cubic1_108") test_output = hg.s_cubic_1(108) assert sorted(test_output) == sorted(correct) assert hg.s_cubic_1(10) == [] correct = read_file("test/output/s_cubic2_8") test_output = hg.s_cubic_2(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/s_cubic2_108") test_output = hg.s_cubic_2(108) assert sorted(test_output) == sorted(correct) assert hg.s_cubic_2(10) == [] correct = read_file("test/output/s_cubic3_8") test_output = hg.s_cubic_3(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/s_cubic3_108") test_output = hg.s_cubic_3(108) assert sorted(test_output) == sorted(correct) assert hg.s_cubic_3(10) == [] correct = read_file("test/output/s_cubic4_8") test_output = hg.s_cubic_4(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/s_cubic4_108") test_output = hg.s_cubic_4(108) assert sorted(test_output) == sorted(correct) assert hg.s_cubic_4(10) == [] correct = read_file("test/output/s_cubic5_8") test_output = hg.s_cubic_5(8) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/s_cubic5_108") test_output = hg.s_cubic_5(108) assert sorted(test_output) == sorted(correct) assert hg.s_cubic_5(10) == [] def test_trig(): correct = read_file("test/output/trig1_10") test_output = hg.trig_1(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig1_100") test_output = hg.trig_1(100) print (correct) print (test_output) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig2_10") test_output = hg.trig_2(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig2_100") test_output = hg.trig_2(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig3_10") test_output = hg.trig_3(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig3_100") test_output = hg.trig_3(100) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig4_10") test_output = hg.trig_4(10) assert sorted(test_output) == sorted(correct) correct = read_file("test/output/trig4_100") test_output = hg.trig_4(100) assert sorted(test_output) == sorted(correct)
import inspect from collections import OrderedDict from datetime import datetime from decimal import Decimal from django.conf import settings from django.contrib.contenttypes.fields import GenericRel from django.db import models from django.db.models import FieldDoesNotExist, ManyToManyRel, ManyToOneRel from django.utils.timezone import get_current_timezone from .ast import Comparison, Const, List, Logical, Name, Node from .compat import text_type from .exceptions import DjangoQLSchemaError class DjangoQLField(object): """ Abstract searchable field """ model = None name = None nullable = False suggest_options = False type = 'unknown' value_types = [] value_types_description = '' def __init__(self, model=None, name=None, nullable=None, suggest_options=None): if model is not None: self.model = model if name is not None: self.name = name if nullable is not None: self.nullable = nullable if suggest_options is not None: self.suggest_options = suggest_options def as_dict(self): return { 'type': self.type, 'nullable': self.nullable, 'options': list(self.get_options()) if self.suggest_options else [], } def _field_choices(self): if self.model: try: return self.model._meta.get_field(self.name).choices except FieldDoesNotExist: pass return [] def get_options(self): """ Override this method to provide custom suggestion options """ choices = self._field_choices() if choices: return [c[1] for c in choices] else: return self.model.objects.\ order_by(self.name).\ values_list(self.name, flat=True) def get_lookup_name(self): """ Override this method to provide custom lookup name """ return self.name def get_lookup_value(self, value): """ Override this method to convert displayed values to lookup values """ choices = self._field_choices() if choices: if isinstance(value, list): return [c[0] for c in choices if c[1] in value] else: for c in choices: if c[1] == value: return c[0] return value def get_operator(self, operator): """ Get a comparison suffix to be used in Django ORM & inversion flag for it :param operator: string, DjangoQL comparison operator :return: (suffix, invert) - a tuple with 2 values: suffix - suffix to be used in ORM query, for example '__gt' for '>' invert - boolean, True if this comparison needs to be inverted """ op = { '=': '', '>': '__gt', '>=': '__gte', '<': '__lt', '<=': '__lte', '~': '__icontains', 'in': '__in', }.get(operator) if op is not None: return op, False op = { '!=': '', '!~': '__icontains', 'not in': '__in', }[operator] return op, True def get_lookup(self, path, operator, value): """ Performs a lookup for this field with given path, operator and value. Override this if you'd like to implement a fully custom lookup. It should support all comparison operators compatible with the field type. :param path: a list of names preceding current lookup. For example, if expression looks like 'author.groups.name = "Foo"' path would be ['author', 'groups']. 'name' is not included, because it's the current field instance itself. :param operator: a string with comparison operator. It could be one of the following: '=', '!=', '>', '>=', '<', '<=', '~', '!~', 'in', 'not in'. Depending on the field type, some operators may be excluded. '~' and '!~' can be applied to StrField only and aren't allowed for any other fields. BoolField can't be used with less or greater operators, '>', '>=', '<' and '<=' are excluded for it. :param value: value passed for comparison :return: Q-object """ search = '__'.join(path + [self.get_lookup_name()]) op, invert = self.get_operator(operator) q = models.Q(**{'%s%s' % (search, op): self.get_lookup_value(value)}) return ~q if invert else q def validate(self, value): if not self.nullable and value is None: raise DjangoQLSchemaError( 'Field %s is not nullable, ' 'can\'t compare it to None' % self.name ) if value is not None and type(value) not in self.value_types: if self.nullable: msg = ( 'Field "{field}" has "nullable {field_type}" type. ' 'It can be compared to {possible_values} or None, ' 'but not to {value}' ) else: msg = ( 'Field "{field}" has "{field_type}" type. It can ' 'be compared to {possible_values}, ' 'but not to {value}' ) raise DjangoQLSchemaError(msg.format( field=self.name, field_type=self.type, possible_values=self.value_types_description, value=repr(value), )) class IntField(DjangoQLField): type = 'int' value_types = [int] value_types_description = 'integer numbers' class FloatField(DjangoQLField): type = 'float' value_types = [int, float, Decimal] value_types_description = 'floating point numbers' class StrField(DjangoQLField): type = 'str' value_types = [text_type] value_types_description = 'strings' class BoolField(DjangoQLField): type = 'bool' value_types = [bool] value_types_description = 'True or False' class DateField(DjangoQLField): type = 'date' value_types = [text_type] value_types_description = 'dates in "YYYY-MM-DD" format' def validate(self, value): super(DateField, self).validate(value) try: self.get_lookup_value(value) except ValueError: raise DjangoQLSchemaError( 'Field "%s" can be compared to dates in ' '"YYYY-MM-DD" format, but not to %s' % ( self.name, repr(value), ) ) def get_lookup_value(self, value): return datetime.strptime(value, '%Y-%m-%d').date() class DateTimeField(DjangoQLField): type = 'datetime' value_types = [text_type] value_types_description = 'timestamps in "YYYY-MM-DD HH:MM" format' def validate(self, value): super(DateTimeField, self).validate(value) try: self.get_lookup_value(value) except ValueError: raise DjangoQLSchemaError( 'Field "%s" can be compared to timestamps in ' '"YYYY-MM-DD HH:MM" format, but not to %s' % ( self.name, repr(value), ) ) def get_lookup_value(self, value): mask = '%Y-%m-%d' if len(value) > 10: mask += ' %H:%M' if len(value) > 16: mask += ':%S' dt = datetime.strptime(value, mask) if settings.USE_TZ: dt = dt.replace(tzinfo=get_current_timezone()) return dt def get_lookup(self, path, operator, value): search = '__'.join(path + [self.get_lookup_name()]) op, invert = self.get_operator(operator) # Add LIKE operator support for datetime fields. For LIKE comparisons # we don't want to convert source value to datetime instance, because # it would effectively kill the idea. What we want is expressions like # 'created ~ "2017-01-30' # to be translated to # 'created LIKE %2017-01-30%', # but it would work only if we pass a string as a parameter. If we pass # a datetime instance, it would add time part in a form of 00:00:00, # and resulting comparison would look like # 'created LIKE %2017-01-30 00:00:00%' # which is not what we want for this case. val = value if operator in ('~', '!~') else self.get_lookup_value(value) q = models.Q(**{'%s%s' % (search, op): val}) return ~q if invert else q class RelationField(DjangoQLField): type = 'relation' def __init__(self, model, name, related_model, nullable=False, suggest_options=False): super(RelationField, self).__init__( model=model, name=name, nullable=nullable, suggest_options=suggest_options, ) self.related_model = related_model @property def relation(self): return DjangoQLSchema.model_label(self.related_model) def as_dict(self): dikt = super(RelationField, self).as_dict() dikt['relation'] = self.relation return dikt class DjangoQLSchema(object): include = () # models to include into introspection exclude = () # models to exclude from introspection suggest_options = None def __init__(self, model): if not inspect.isclass(model) or not issubclass(model, models.Model): raise DjangoQLSchemaError( 'Schema must be initialized with a subclass of Django model' ) if self.include and self.exclude: raise DjangoQLSchemaError( 'Either include or exclude can be specified, but not both' ) if self.excluded(model): raise DjangoQLSchemaError( "%s can't be used with %s because it's excluded from it" % ( model, self.__class__, ) ) self.current_model = model self._models = None if self.suggest_options is None: self.suggest_options = {} def excluded(self, model): return model in self.exclude or \ (self.include and model not in self.include) @property def models(self): if not self._models: self._models = self.introspect( model=self.current_model, exclude=tuple(self.model_label(m) for m in self.exclude), ) return self._models @classmethod def model_label(self, model): return text_type(model._meta) def introspect(self, model, exclude=()): """ Start with given model and recursively walk through its relationships. Returns a dict with all model labels and their fields found. """ fields = OrderedDict() result = {self.model_label(model): fields} for field in self.get_fields(model): if not isinstance(field, DjangoQLField): field = self.get_field_instance(model, field) if not field: continue fields[field.name] = field if isinstance(field, RelationField) \ and field.relation not in exclude: result.update(self.introspect( model=field.related_model, exclude=tuple(exclude) + tuple(result.keys()), )) return result def get_fields(self, model): """ By default, returns all field names of a given model. Override this method to limit field options. You can either return a plain list of field names from it, like ['id', 'name'], or call .super() and exclude unwanted fields from its result. """ return sorted( [f.name for f in model._meta.get_fields() if f.name != 'password'] ) def get_field_instance(self, model, field_name): field = model._meta.get_field(field_name) field_kwargs = {'model': model, 'name': field.name} if field.is_relation: if not field.related_model: # GenericForeignKey return if self.excluded(field.related_model): return field_cls = RelationField field_kwargs['related_model'] = field.related_model else: field_cls = self.get_field_cls(field) if isinstance(field, (ManyToOneRel, ManyToManyRel, GenericRel)): # Django 1.8 doesn't have .null attribute for these fields field_kwargs['nullable'] = True else: field_kwargs['nullable'] = field.null field_kwargs['suggest_options'] = ( field.name in self.suggest_options.get(model, []) ) field_instance = field_cls(**field_kwargs) # Check if suggested options conflict with field type if field_cls != StrField and field_instance.suggest_options: for option in field_instance.get_options(): if isinstance(option, text_type): # Convert to StrField field_instance = StrField(**field_kwargs) return field_instance def get_field_cls(self, field): str_fields = (models.CharField, models.TextField, models.UUIDField) if isinstance(field, str_fields): return StrField elif isinstance(field, (models.AutoField, models.IntegerField)): return IntField elif isinstance(field, (models.BooleanField, models.NullBooleanField)): return BoolField elif isinstance(field, (models.DecimalField, models.FloatField)): return FloatField elif isinstance(field, models.DateTimeField): return DateTimeField elif isinstance(field, models.DateField): return DateField return DjangoQLField def as_dict(self): models = {} for model_label, fields in self.models.items(): models[model_label] = OrderedDict( [(name, field.as_dict()) for name, field in fields.items()] ) return { 'current_model': self.model_label(self.current_model), 'models': models, } def resolve_name(self, name): assert isinstance(name, Name) model = self.model_label(self.current_model) field = None for name_part in name.parts: field = self.models[model].get(name_part) if not field: raise DjangoQLSchemaError( 'Unknown field: %s. Possible choices are: %s' % ( name_part, ', '.join(sorted(self.models[model].keys())), ) ) if field.type == 'relation': model = field.relation field = None return field def validate(self, node): """ Validate DjangoQL AST tree vs. current schema """ assert isinstance(node, Node) if isinstance(node.operator, Logical): self.validate(node.left) self.validate(node.right) return assert isinstance(node.left, Name) assert isinstance(node.operator, Comparison) assert isinstance(node.right, (Const, List)) # Check that field and value types are compatible field = self.resolve_name(node.left) value = node.right.value if field is None: if value is not None: raise DjangoQLSchemaError( 'Related model %s can be compared to None only, but not to ' '%s' % (node.left.value, type(value).__name__) ) else: values = value if isinstance(node.right, List) else [value] for v in values: field.validate(v)
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class V1ObjectMeta(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ V1ObjectMeta - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'name': 'str', 'generate_name': 'str', 'namespace': 'str', 'self_link': 'str', 'uid': 'str', 'resource_version': 'str', 'generation': 'int', 'creation_timestamp': 'str', 'deletion_timestamp': 'str', 'deletion_grace_period_seconds': 'int', 'labels': 'str', 'annotations': 'str' } self.attribute_map = { 'name': 'name', 'generate_name': 'generateName', 'namespace': 'namespace', 'self_link': 'selfLink', 'uid': 'uid', 'resource_version': 'resourceVersion', 'generation': 'generation', 'creation_timestamp': 'creationTimestamp', 'deletion_timestamp': 'deletionTimestamp', 'deletion_grace_period_seconds': 'deletionGracePeriodSeconds', 'labels': 'labels', 'annotations': 'annotations' } self._name = None self._generate_name = None self._namespace = None self._self_link = None self._uid = None self._resource_version = None self._generation = None self._creation_timestamp = None self._deletion_timestamp = None self._deletion_grace_period_seconds = None self._labels = None self._annotations = None @property def name(self): """ Gets the name of this V1ObjectMeta. Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names :return: The name of this V1ObjectMeta. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this V1ObjectMeta. Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names :param name: The name of this V1ObjectMeta. :type: str """ self._name = name @property def generate_name(self): """ Gets the generate_name of this V1ObjectMeta. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency :return: The generate_name of this V1ObjectMeta. :rtype: str """ return self._generate_name @generate_name.setter def generate_name(self, generate_name): """ Sets the generate_name of this V1ObjectMeta. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency :param generate_name: The generate_name of this V1ObjectMeta. :type: str """ self._generate_name = generate_name @property def namespace(self): """ Gets the namespace of this V1ObjectMeta. Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md :return: The namespace of this V1ObjectMeta. :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """ Sets the namespace of this V1ObjectMeta. Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md :param namespace: The namespace of this V1ObjectMeta. :type: str """ self._namespace = namespace @property def self_link(self): """ Gets the self_link of this V1ObjectMeta. SelfLink is a URL representing this object. Populated by the system. Read-only. :return: The self_link of this V1ObjectMeta. :rtype: str """ return self._self_link @self_link.setter def self_link(self, self_link): """ Sets the self_link of this V1ObjectMeta. SelfLink is a URL representing this object. Populated by the system. Read-only. :param self_link: The self_link of this V1ObjectMeta. :type: str """ self._self_link = self_link @property def uid(self): """ Gets the uid of this V1ObjectMeta. UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids :return: The uid of this V1ObjectMeta. :rtype: str """ return self._uid @uid.setter def uid(self, uid): """ Sets the uid of this V1ObjectMeta. UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids :param uid: The uid of this V1ObjectMeta. :type: str """ self._uid = uid @property def resource_version(self): """ Gets the resource_version of this V1ObjectMeta. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency :return: The resource_version of this V1ObjectMeta. :rtype: str """ return self._resource_version @resource_version.setter def resource_version(self, resource_version): """ Sets the resource_version of this V1ObjectMeta. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency :param resource_version: The resource_version of this V1ObjectMeta. :type: str """ self._resource_version = resource_version @property def generation(self): """ Gets the generation of this V1ObjectMeta. A sequence number representing a specific generation of the desired state. Currently only implemented by replication controllers. Populated by the system. Read-only. :return: The generation of this V1ObjectMeta. :rtype: int """ return self._generation @generation.setter def generation(self, generation): """ Sets the generation of this V1ObjectMeta. A sequence number representing a specific generation of the desired state. Currently only implemented by replication controllers. Populated by the system. Read-only. :param generation: The generation of this V1ObjectMeta. :type: int """ self._generation = generation @property def creation_timestamp(self): """ Gets the creation_timestamp of this V1ObjectMeta. CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :return: The creation_timestamp of this V1ObjectMeta. :rtype: str """ return self._creation_timestamp @creation_timestamp.setter def creation_timestamp(self, creation_timestamp): """ Sets the creation_timestamp of this V1ObjectMeta. CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :param creation_timestamp: The creation_timestamp of this V1ObjectMeta. :type: str """ self._creation_timestamp = creation_timestamp @property def deletion_timestamp(self): """ Gets the deletion_timestamp of this V1ObjectMeta. DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :return: The deletion_timestamp of this V1ObjectMeta. :rtype: str """ return self._deletion_timestamp @deletion_timestamp.setter def deletion_timestamp(self, deletion_timestamp): """ Sets the deletion_timestamp of this V1ObjectMeta. DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :param deletion_timestamp: The deletion_timestamp of this V1ObjectMeta. :type: str """ self._deletion_timestamp = deletion_timestamp @property def deletion_grace_period_seconds(self): """ Gets the deletion_grace_period_seconds of this V1ObjectMeta. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. :return: The deletion_grace_period_seconds of this V1ObjectMeta. :rtype: int """ return self._deletion_grace_period_seconds @deletion_grace_period_seconds.setter def deletion_grace_period_seconds(self, deletion_grace_period_seconds): """ Sets the deletion_grace_period_seconds of this V1ObjectMeta. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. :param deletion_grace_period_seconds: The deletion_grace_period_seconds of this V1ObjectMeta. :type: int """ self._deletion_grace_period_seconds = deletion_grace_period_seconds @property def labels(self): """ Gets the labels of this V1ObjectMeta. Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md :return: The labels of this V1ObjectMeta. :rtype: str """ return self._labels @labels.setter def labels(self, labels): """ Sets the labels of this V1ObjectMeta. Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md :param labels: The labels of this V1ObjectMeta. :type: str """ self._labels = labels @property def annotations(self): """ Gets the annotations of this V1ObjectMeta. Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md :return: The annotations of this V1ObjectMeta. :rtype: str """ return self._annotations @annotations.setter def annotations(self, annotations): """ Sets the annotations of this V1ObjectMeta. Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md :param annotations: The annotations of this V1ObjectMeta. :type: str """ self._annotations = annotations def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
#Quizzler Command Line Application #Author : Mabishi Elizabeth for Andela Kenya #Date: 5th May 2016 #Version 1.0 # Library to provide way of using operating system dependent functionality i.e.make directories import os # Library to manipulate JavaScript Object Notation files with python import json # Library to help randomise data import random #Library to enable printing and manipulation of time import time #Library to enable Command Line Interface Functionality import cmd #Enables progress bar functionality from tqdm import tqdm #Library to enable high level file functionality import shutil #Library to enhance User Experience with colours from colorama import Fore, Back, Style from colorama import init #Library to enhance User Experience with colours from termcolor import cprint #Library to enhance User Experience with colours and different text fonts from pyfiglet import figlet_format #Library to hook system specific parameters import sys #Library to prettify tables from prettytable import PrettyTable #Library to integrate with Firebase from firebase import firebase # Firebase database url to upload to and download quizzes from firebase_url = 'https://scorching-inferno-6139.firebaseio.com/' # Firebase|Python API to update(PATCH, PUT), create(POST), or remove(DELETE) stored data firebase = firebase.FirebaseApplication(firebase_url, None) this_path = os.path.dirname(os.path.abspath(__file__)) sounds_path = this_path + '/sounds/' quizzes_path = this_path + '/quizzes/' class Quiz(cmd.Cmd): """ Class that includes all Quizzler functions """ #Introduction script, runs when Quizzler begins prompt = '|Quizzler|' cprint(figlet_format('QUIZZLER!', font='cyberlarge'), 'yellow', 'on_red', attrs=['bold']) cprint(figlet_format('Test Yourself!', font='cyberlarge'), 'yellow', 'on_red', attrs=['bold']) init() # strip colors if stdout is redirected init(strip=not sys.stdout.isatty()) #os.system("start C:\QuizzlerSounds\CrowdCheer.wav") a = '*' b = '**' c = '***' d = '=' e = '-' print "-".center(78, e) print " " player_name = raw_input("What is your name Player?\n") print " " print "-".center(78, e) print c + " Greetings! {} ".format(player_name).center(74, d) + c print " " print Fore.YELLOW + c + " Welcome to Quizzler! ".center(74, d) + c print " " print c + " Give me a moment to load! ".center(74, d) + c time.sleep(0.5) for x in tqdm(range(20)): time.sleep(0.1) print " " print " " print Fore.YELLOW + " ".center(80, e) print Fore.YELLOW + " Use these commands to explore quizzler's functionality.".center(74) print " " time.sleep(0.5) print Fore.YELLOW + " COMMANDS ".center(78) print " ".center(80, e) #Make table green print Fore.GREEN + " " #Make table with Commands list x = PrettyTable(["Command", "Description"]) x.add_row(["help", "Displays all available commands and their descriptions"]) x.add_row(["help <command>", "Describes the command"]) x.add_row(["listquizzes", "Displays available local quizzes"]) x.add_row(["takequiz <quiz name>", "Launches the local quiz, quiz name"]) x.add_row([" listonline", "Display available online quizzes "]) x.add_row(["download <quiz source path>", "Add quiz to local collection from online source"]) x.add_row(["import <quiz source path>", "Add quiz to local collection from external source "]) x.add_row(["upload <quiz source path>", "Add quiz from local collection to online database "]) print x print " " #Rest colorama colours print(Style.RESET_ALL) def do_listquizzes(self, line): """ DESCRIPTION: List all local quizzes in Quizzler library USAGE: Command : listquizzes """ #path_to_quizzes = 'C:\\Quizzler\\Quizzes' #Check whether the folder structure exists, if it does not, create it if os.path.exists(quizzes_path) == False: os.makedirs(quizzes_path) #Filesize of new empty folder = 0 if os.path.getsize(quizzes_path) == 0: print "You currently have no quizzes. Please import or download a quiz. See help for details." elif os.path.getsize(quizzes_path) != 0: print "***" + "These are your local quizzes".center(74, "*") + "***" for file in os.listdir(quizzes_path): #Check whether file is .json files if file.endswith(".json"): #if it is, print it out without the .json extension print ("="*37) + " " + ((file)[:len(file) - 5]) + " " + ("="*37) time.sleep(1) #User tip print "\nTip: Use command 'takequiz <quizname> to begin taking a quiz\n".center(74," ") #Add some styling print " ".center(80,"*") def do_takequiz(self, quiz_name): """ DESCRIPTION: Begin taking a quiz. USAGE: Command : takequiz <quiz name> Start taking a new quiz of quiz name """ quiz_name = quiz_name.upper() path_to_quiz = quizzes_path + quiz_name +'.json' try: #If quiz_name given by user is in the basename of the quiz: Allows for user errors if quiz_name in os.path.basename(path_to_quiz): #use json load function to convert to list with open(path_to_quiz) as quiz: quiz_data = json.load(quiz) #Pick questions in .json file questions = quiz_data.keys() #Shuffle questions in quiz random.shuffle(questions) #Start quiz #Set initial score to Zero score = 0 #Monitor number of questions asked position = 0 #Start timing now start_time = time.time() #default duration set : 10seconds * number of questions duration = 10 * len(questions) #While the position variable is less than the number of questions, while position < len(questions): #return a question in the quiz print questions[position] # Add Space for readability print " " #There's still time left out_of_time = False #Prompt user for an answer user_answer = raw_input("Please enter your answer.\n") elapsed = time.time() - start_time #Stop quiz if time is spent if elapsed > duration: out_of_time == True print Fore.RED +"Sorry! Your time's up!" print(Style.RESET_ALL) break #Add space for readability print " " #Every time a question is answered print out the time left for the quiz print "time remaining: %.f seconds" % (duration - elapsed) #Check if answer is correct and return appropriate response correct_answer = str(quiz_data[(questions[position])]) if user_answer.upper() == correct_answer.upper(): print " " print Fore.GREEN +"Your answer is correct! \n" #Play this sound #os.system("start C:\QuizzlerSounds\AudienceApplause.wav") print(Style.RESET_ALL) print " " score += 1 print "Your score is {}".format(score) else: print " " print Fore.RED +"Your answer is incorrect \n" print(Style.RESET_ALL) print "Your score is {}".format(score) print " " #A question has been attempted, increment the position variable position += 1 #Questions are over if position == len(questions): print "Your total score is {}".format(score) print "\nQuestions in module over. Please take another quiz".center(78,"-") print " \n" print "Use <listquizzes> to see your list of local quizzes or <help> to view options.\n".center(74, "-") #If quiz does not exist, except IOError: print "Invalid response.Quiz does not exist. Please try again. Use takequiz <quiz name>." print " " def do_import(self, src): """ DESCRIPTION: Import quiz from external location other than the internet. Use for external and internal storage locations USAGE: Command : import <quiz source path> """ #local_destination = 'C:\\Quizzler\\Quizzes' #If folder does not exist, create it if os.path.exists(quizzes_path) == False: os.makedirs(quizzes_path) try: #Copy json file from source to destination shutil.copy((src + '.json'),quizzes_path) print "Quiz successfully imported to local quiz folder" #Print Error message showing user that source and local path are the same except shutil.Error: print "Error! Source path and local_destination are the same\n".center(74, "-") print "Please attempt import again.".center(74, "-") #Print Error message showing user that source destination does not exist except IOError: print "Source destination does not exist" def do_listonline(self, online_quizzes): """ DESCRIPTION: List quizzes stored online USAGE: Command: listonline """ # Perform a get request and read the online quizzes folder in Firebase #Quizzes stored under folder Quiz in Firebase online_quizzes = firebase.get('/Quiz', None) #Quiz holds quizzes # Loop over the onlinequizzes to list them for quiz in online_quizzes: # Print quiz from firebase db print quiz print " " def do_download(self, quiz_name): """ DESCRIPTION: Download quizzes stored in the online database USAGE: Command: download <quiz name> """ quiz_name = quiz_name.upper() # Url for the selected json file in the Firebase database download_url = firebase_url + '/Quiz/' + quiz_name # Folder to store downloaded quiz #destination_folder = "C:\\Quizzler\\Quizzes" file_path = quizzes_path + quiz_name + '.json' # Check if destination folder exists and create it if it does not exist if os.path.exists(quizzes_path) == False: os.makedirs(quizzes_path) #Check if file already exists if os.path.isfile(file_path) == True: print "Quiz already exists in local quiz folder" else: #use get request to Firebase to get the quiz result = firebase.get('/Quiz/' + quiz_name, None) #Copy the quiz try: with open(file_path, 'w') as fp: json.dump(result, fp) print "-" + "Downloading file".center(74,"*") + "-" except: print "\nError! Quiz failed to download! Please try again\n".center(74, "*") print "To download quiz please type: downloadquiz <quiz_name>".center(74, "*") def do_upload(self, quiz_source_path): """ DESCRIPTION: Upload quiz to online Firebase database USAGE: Command: upload <quiz source path> """ quiz_full_path = quiz_source_path + ".json" quiz_name = os.path.basename(quiz_source_path) quiz_name_to_post = str(quiz_name) #If file exists, upload if os.path.isfile(quiz_full_path) == True: print "File existence confirmed".center(74, "-") #Space for readability print " " # Write the contents to json file with open(quiz_full_path, 'r') as json_file: try: # Use json.load to move contents quiz = json.load(json_file) print "Uploading Quiz {}".format(quiz_name).center(74,"-") for n in tqdm(range(10)): time.sleep(0.5) # Call a put request and save to the firebase database firebase.put("/Quiz/",quiz_name, quiz) print "\nQuiz successfully uploaded!\n".center(74,"-") print " " except: print "Upload failed! Please type upload<quiz_name> to try again.\n".center(74,"-") print "Type help<uploadquiz> for help." else: print "Quiz does not exist at source.".center(74,"-") #def do_applause(self,line): #Play wav file #os.system("start C:\QuizzlerSounds\AudienceApplause.wav") def do_EOF(self, line): return True if __name__ == '__main__': Quiz().cmdloop()
import logging LOG = logging.getLogger(__name__) # Parameter operations. Just needed if we would get the paramset-descriptions to do some auto-configuration magic. PARAM_OPERATION_READ = 1 PARAM_OPERATION_WRITE = 2 PARAM_OPERATION_EVENT = 4 PARAM_UNREACH = 'UNREACH' PARAMSET_VALUES = 'VALUES' class HMGeneric(): # pylint: disable=unused-argument def __init__(self, device_description, proxy, resolveparamsets): # These properties are available for every device and its channels self._ADDRESS = device_description.get('ADDRESS') LOG.debug("HMGeneric.__init__: device_description: " + str(self._ADDRESS) + " : " + str(device_description)) self._FAMILY = device_description.get('FAMILY') self._FLAGS = device_description.get('FLAGS') self._ID = device_description.get('ID') self._PARAMSETS = device_description.get('PARAMSETS') self._PARAMSET_DESCRIPTIONS = {} self._TYPE = device_description.get('TYPE') self._VERSION = device_description.get('VERSION') self._proxy = proxy self._paramsets = {} self._eventcallbacks = [] self._name = None self._VALUES = {} # Dictionary to cache values. They are updated in the event() function. self._VALUES[PARAM_UNREACH] = None @property def ADDRESS(self): return self._ADDRESS @property def TYPE(self): return self._TYPE @property def PARAMSETS(self): return self._paramsets @property def NAME(self): return self._name @NAME.setter def NAME(self, name): self._name = name def event(self, interface_id, key, value): """ Handle the event received by server. """ LOG.debug( "HMGeneric.event: address=%s, interface_id=%s, key=%s, value=%s" % (self._ADDRESS, interface_id, key, value)) self._VALUES[key] = value # Cache the value for callback in self._eventcallbacks: LOG.debug("HMGeneric.event: Using callback %s", str(callback)) callback(self._ADDRESS, interface_id, key, value) def getParamsetDescription(self, paramset): """ Descriptions for paramsets are available to determine what can be don with the device. """ try: self._PARAMSET_DESCRIPTIONS[paramset] = self._proxy.getParamsetDescription(self._ADDRESS, paramset) except Exception as err: LOG.error("HMGeneric.getParamsetDescription: Exception: %s", err) return False def updateParamset(self, paramset): """ Devices should not update their own paramsets. They rely on the state of the server. Hence we pull the specified paramset. """ try: if paramset: if self._proxy: returnset = self._proxy.getParamset(self._ADDRESS, paramset) if returnset: self._paramsets[paramset] = returnset if self.PARAMSETS: if self.PARAMSETS.get(PARAMSET_VALUES): self._VALUES[PARAM_UNREACH] = self.PARAMSETS.get(PARAMSET_VALUES).get(PARAM_UNREACH) return True return False except Exception as err: LOG.debug("HMGeneric.updateParamset: Exception: %s, %s, %s" % (str(err), str(self._ADDRESS), str(paramset))) return False def updateParamsets(self): """ Devices should update their own paramsets. They rely on the state of the server. Hence we pull all paramsets. """ try: for ps in self._PARAMSETS: self.updateParamset(ps) return True except Exception as err: LOG.error("HMGeneric.updateParamsets: Exception: %s", err) return False def putParamset(self, paramset, data={}, rx_mode=None): """ Some devices act upon changes to paramsets. A "putted" paramset must not contain all keys available in the specified paramset, just the ones which are writable and should be changed. """ try: if paramset in self._PARAMSETS and data: if rx_mode is None: self._proxy.putParamset(self._ADDRESS, paramset, data) else: self._proxy.putParamset(self._ADDRESS, paramset, data, rx_mode) # We update all paramsets to at least have a temporarily accurate state for the device. # This might not be true for tasks that take long to complete (lifting a rollershutter completely etc.). # For this the server-process has to call the updateParamsets-method when it receives events for the device. self.updateParamsets() return True else: return False except Exception as err: LOG.error("HMGeneric.putParamset: Exception: %s", err) return False class HMChannel(HMGeneric): def __init__(self, device_description, proxy, resolveparamsets=False): super().__init__(device_description, proxy, resolveparamsets) # These properties only exist for device-channels self._PARENT = device_description.get('PARENT') self._AES_ACTIVE = device_description.get('AES_ACTIVE') self._DIRECTION = device_description.get('DIRECTION') self._INDEX = device_description.get('INDEX') self._LINK_SOURCE_ROLES = device_description.get('LINK_SOURCE_ROLES') self._LINK_TARGET_ROLES = device_description.get('LINK_TARGET_ROLES') self._PARENT_TYPE = device_description.get('PARENT_TYPE') # We set the name to the parents address initially self._name = device_description.get('ADDRESS') # Optional properties of device-channels self._GROUP = device_description.get('GROUP') self._TEAM = device_description.get('TEAM') self._TEAM_TAG = device_description.get('TEAM_TAG') self._TEAM_CHANNELS = device_description.get('TEAM_CHANNELS') # Not in specification, but often present self._CHANNEL = device_description.get('CHANNEL') if resolveparamsets: self.updateParamsets() def getCachedOrUpdatedValue(self, key): """ Gets the device's value with the given key. If the key is not found in the cache, the value is queried from the host. """ try: return self._VALUES[key] except KeyError: return self.getValue(key) @property def PARENT(self): return self._PARENT @property def UNREACH(self): """ Returns true if children is not reachable """ return bool(self._VALUES.get(PARAM_UNREACH, False)) def setEventCallback(self, callback): """ Set additional event callbacks for the channel. Signature for callback-functions: foo(address, interface_id, key, value). """ if hasattr(callback, '__call__'): self._eventcallbacks.append(callback) def setValue(self, key, value): """ Some devices allow to directly set values to perform a specific task. """ LOG.debug("HMGeneric.setValue: address = '%s', key = '%s' value = '%s'", self._ADDRESS, key, value) try: self._proxy.setValue(self._ADDRESS, key, value) return True except Exception as err: LOG.error("HMGeneric.setValue: %s on %s Exception: %s", key, self._ADDRESS, err) return False def getValue(self, key): """ Some devices allow to directly get values for specific parameters. """ LOG.debug("HMGeneric.getValue: address = '%s', key = '%s'", self._ADDRESS, key) try: returnvalue = self._proxy.getValue(self._ADDRESS, key) self._VALUES[key] = returnvalue return returnvalue except Exception as err: LOG.info("HMGeneric.getValue: %s on %s Exception: %s", key, self._ADDRESS, err) return False class HMDevice(HMGeneric): def __init__(self, device_description, proxy, resolveparamsets=False): super().__init__(device_description, proxy, resolveparamsets) self._hmchannels = {} # Data point information # "NODE_NAME": channel # for channel is possible: # - c / getVaule from channel (dynamic) # - 0...n / getValue from channel (fix) self._SENSORNODE = {} self._BINARYNODE = {} self._ATTRIBUTENODE = {} self._WRITENODE = {} self._EVENTNODE = {} self._ACTIONNODE = {} # These properties only exist for interfaces themselves self._CHILDREN = device_description.get('CHILDREN') self._RF_ADDRESS = device_description.get('RF_ADDRESS') # We set the name to the address initially self._name = device_description.get('ADDRESS') # Optional properties might not always be present if 'CHANNELS' in device_description: self._CHANNELS = device_description['CHANNELS'] else: self._CHANNELS = [] self._PHYSICAL_ADDRESS = device_description.get('PHYSICAL_ADDRESS') self._INTERFACE = device_description.get('INTERFACE') self._ROAMING = device_description.get('ROAMING') self._RX_MODE = device_description.get('RX_MODE') self._FIRMWARE = device_description.get('FIRMWARE') self._AVAILABLE_FIRMWARE = device_description.get('AVAILABLE_FIRMWARE') self._UPDATABLE = device_description.get('UPDATABLE') self._PARENT_TYPE = None def getCachedOrUpdatedValue(self, key, channel=None): """ Gets the channel's value with the given key. If the key is not found in the cache, the value is queried from the host. If 'channel' is given, the respective channel's value is returned. """ if channel: return self._hmchannels[channel].getCachedOrUpdatedValue(key) try: return self._VALUES[key] except KeyError: value = self._VALUES[key] = self.getValue(key) return value @property def UNREACH(self): """ Returns true if the device or any children is not reachable """ if self._VALUES.get(PARAM_UNREACH, False): return True else: for device in self._hmchannels.values(): if device.UNREACH: return True return False @property def CHANNELS(self): return self._hmchannels @property def SENSORNODE(self): return self._SENSORNODE @property def BINARYNODE(self): return self._BINARYNODE @property def ATTRIBUTENODE(self): return self._ATTRIBUTENODE @property def WRITENODE(self): return self._WRITENODE @property def EVENTNODE(self): return self._EVENTNODE @property def ACTIONNODE(self): return self._ACTIONNODE def getAttributeData(self, name, channel=None): """ Returns a attribut """ return self._getNodeData(name, self._ATTRIBUTENODE, channel) def getBinaryData(self, name, channel=None): """ Returns a binary node """ return self._getNodeData(name, self._BINARYNODE, channel) def getSensorData(self, name, channel=None): """ Returns a sensor node """ return self._getNodeData(name, self._SENSORNODE, channel) def getWriteData(self, name, channel=None): """ Returns a sensor node """ return self._getNodeData(name, self._WRITENODE, channel) def _getNodeData(self, name, metadata, channel=None): """ Returns a data point from data""" nodeChannel = None if name in metadata: nodeChannelList = metadata[name] if len(nodeChannelList) > 1: nodeChannel = channel if channel is not None else nodeChannelList[0] elif len(nodeChannelList) == 1: nodeChannel = nodeChannelList[0] else: LOG.warning("HMDevice._getNodeData: %s not found in %s, empty nodeChannelList" % (name, metadata)) return None if nodeChannel is not None and nodeChannel in self.CHANNELS: return self._hmchannels[nodeChannel].getValue(name) LOG.error("HMDevice._getNodeData: %s not found in %s" % (name, metadata)) return None def writeNodeData(self, name, data, channel=None): return self._setNodeData(name, self.WRITENODE, data, channel) def actionNodeData(self, name, data, channel=None): return self._setNodeData(name, self.ACTIONNODE, data, channel) def _setNodeData(self, name, metadata, data, channel=None): """ Returns a data point from data""" nodeChannel = None if name in metadata: nodeChannelList = metadata[name] if len(nodeChannelList) > 1: nodeChannel = channel if channel is not None else nodeChannelList[0] elif len(nodeChannelList) == 1: nodeChannel = nodeChannelList[0] if nodeChannel is not None and nodeChannel in self.CHANNELS: return self._hmchannels[nodeChannel].setValue(name, data) LOG.error("HMDevice.setNodeData: %s not found with value %s on %i" % (name, data, nodeChannel)) return False def get_rssi(self, channel=0): """ This is a stub method which is implemented by the helpers HelperRssiPeer/HelperRssiDevice in order to provide a suitable implementation for the device. """ #pylint: disable=unused-argument return 0 @property def ELEMENT(self): """ Returns count of elements for same functionality. Overwrite this value only if you have a special device such as Sw2 etc. """ return [1] def setEventCallback(self, callback, bequeath=True, channel=0): """ Set additional event callbacks for the device. Set the callback for specific channels or use the device itself and let it bequeath the callback to all of its children. Signature for callback-functions: foo(address, interface_id, key, value) """ if hasattr(callback, '__call__'): if channel == 0: self._eventcallbacks.append(callback) elif not bequeath and channel > 0 and channel in self._hmchannels: self._hmchannels[channel]._eventcallbacks.append(callback) if bequeath: for channel, device in self._hmchannels.items(): device._eventcallbacks.append(callback) def setValue(self, key, value, channel=1): """ Some devices allow to directly set values to perform a specific task. """ if channel in self.CHANNELS: return self.CHANNELS[channel].setValue(key, value) LOG.error("HMDevice.setValue: channel not found %i!" % channel) def getValue(self, key, channel=1): """ Some devices allow to directly get values for specific parameters. """ if channel in self.CHANNELS: return self.CHANNELS[channel].getValue(key) LOG.error("HMDevice.getValue: channel not found %i!" % channel)
"""Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import tensorflow as tf import numpy as np import PIL.Image import os from multiprocessing import dummy as multiprocessing import saliency ROW_MIN = 0 COL_MIN = 1 ROW_MAX = 2 COL_MAX = 3 # Same as in https://github.com/tensorflow/models/blob/master/official/resnet/imagenet_preprocessing.py _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 _CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN] SAL_DIR = 'sal_scratch' ATTR_DIR = 'attr_scratch' RESNET_SHAPE = (224, 224) IMG_SHAPE = (128, 128) def load_imgs(fnames, num_threads=1, shape=RESNET_SHAPE): """Load and preprocess images for ResNet by subtracting means.""" def load_img(f): img = PIL.Image.open(tf.gfile.Open(f, 'rb')) img = img.convert('RGB').resize(shape, PIL.Image.BILINEAR) channel_means = np.expand_dims(np.expand_dims(_CHANNEL_MEANS, 0), 0) img_arr = np.array(img, dtype=np.float32) - channel_means.astype(np.float32) return img_arr pool = multiprocessing.Pool(num_threads) return pool.map(load_img, fnames) def visualize_pos_attr(image_3d, percentile=99): """Returns a 3D tensor as a grayscale 2D tensor by summing the positive attributions of a 3D tensor across axis=2, and then clips values at a given percentile.""" image_2d = np.sum(image_3d.clip(min=0), axis=2) vmax = np.percentile(image_2d, percentile) vmin = np.min(image_2d) return np.clip((image_2d - vmin) / (vmax - vmin), 0, 1) def single_attr(map_2d, loc, obj_mask): """Given a 2D saliency map, the location of an object, and the binary mask of that object, compute the attribution of the object by averaging over its pixel-wise attributions.""" obj_mask_resized = np.array( PIL.Image.fromarray(obj_mask).resize( (loc[COL_MAX] - loc[COL_MIN], loc[ROW_MAX] - loc[ROW_MIN]), PIL.Image.BILINEAR)) avg = np.sum( map_2d[:, loc[ROW_MIN]:loc[ROW_MAX], loc[COL_MIN]:loc[COL_MAX]] * obj_mask_resized, axis=(-1, -2)) / np.count_nonzero(obj_mask_resized) return avg def compute_and_save_attr(model, data, indices, num_threads): """Given the name of a model and a set of data, select a set of images based on provided indices, and compute and save their saliency maps and object attributions.""" base_dir = os.getcwd() data_dir = os.path.join(base_dir, 'data', data, 'val') model_dir = os.path.join(base_dir, 'models', model) sal_output_dir = os.path.join(base_dir, SAL_DIR, model + '-' + data) attr_output_dir = os.path.join(base_dir, ATTR_DIR, model + '-' + data) if not tf.gfile.Exists(sal_output_dir): tf.gfile.MakeDirs(sal_output_dir) if not tf.gfile.Exists(attr_output_dir): tf.gfile.MakeDirs(attr_output_dir) img_names = [sorted(tf.gfile.ListDirectory(data_dir))[i] for i in indices] img_paths = [os.path.join(data_dir, img_name) for img_name in img_names] imgs = load_imgs(img_paths, num_threads) input_name = 'input_tensor:0' logit_name = 'resnet_model/final_dense:0' conv_name = 'resnet_model/block_layer4:0' with tf.Session(graph=tf.Graph()) as sess: tf.saved_model.loader.load(sess, ['serve'], model_dir) graph = tf.get_default_graph() input_tensor = graph.get_tensor_by_name(input_name) logit_tensor = graph.get_tensor_by_name(logit_name) neuron_selector = tf.placeholder(tf.int32) y = logit_tensor[:, neuron_selector] pred_tensor = tf.argmax(logit_tensor, 1) vg = saliency.GradientSaliency(graph, sess, y, input_tensor) gb = saliency.GuidedBackprop(graph, sess, y, input_tensor) ig = saliency.IntegratedGradients(graph, sess, y, input_tensor) gc = saliency.GradCam(graph, sess, y, input_tensor, graph.get_tensor_by_name(conv_name)) def single_map(img, img_name): pred = sess.run(pred_tensor, feed_dict={input_tensor: [img]})[0] vg_mask = vg.GetMask(img, feed_dict={neuron_selector: pred}) # *s is SmoothGrad vgs_mask = vg.GetSmoothedMask( img, feed_dict={neuron_selector: pred}) gb_mask = gb.GetMask(img, feed_dict={neuron_selector: pred}) gbs_mask = gb.GetSmoothedMask( img, feed_dict={neuron_selector: pred}) baseline = np.zeros(img.shape) - np.expand_dims( np.expand_dims(_CHANNEL_MEANS, 0), 0) ig_mask = ig.GetMask( img, feed_dict={neuron_selector: pred}, x_baseline=baseline) igs_mask = ig.GetSmoothedMask( img, feed_dict={neuron_selector: pred}, x_baseline=baseline) gc_mask = gc.GetMask(img, feed_dict={neuron_selector: pred}) gcs_mask = gc.GetSmoothedMask( img, feed_dict={neuron_selector: pred}) # gbgc is guided GradCam gbgc_mask = gb_mask * gc_mask gbgcs_mask = gbs_mask * gcs_mask # Also include gradient x input masks = np.array([ vg_mask, vgs_mask, gb_mask, gbs_mask, ig_mask, igs_mask, gc_mask, gcs_mask, gbgc_mask, gbgcs_mask, vg_mask * img, vgs_mask * img ]) return masks, pred sal_maps = [] preds = [] for img, img_name in zip(imgs, img_names): sal_path = tf.gfile.Glob( os.path.join(sal_output_dir, img_name[:-4] + '*')) if len(sal_path) > 0: sal_maps.append(np.load(tf.gfile.GFile(sal_path[0], 'rb'))) preds.append(sal_path[0].split('_')[-1]) tf.logging.info('Loaded saliency maps for {}.'.format(img_name)) else: masks, pred = single_map(img, img_name) sal_maps.append(masks) preds.append(pred) out_path = os.path.join(sal_output_dir, img_name[:-4] + '_' + str(pred)) np.save(tf.gfile.GFile(out_path, 'w'), masks) tf.logging.info('Saved saliency maps for {}.'.format(img_name)) # Locate the objects, convert 3D saliency maps to 2D, and compute # the attributions of the object segments by averaging over the # per-pixel attributions of the objects. loc_fpath = os.path.join(base_dir, 'data', data, 'val_loc.txt') lines = [tf.gfile.Open(loc_fpath).readlines()[i] for i in indices] locs = np.array([[ int(int(l) * float(RESNET_SHAPE[0]) / IMG_SHAPE[0]) for l in line.rstrip('\n').split(' ')[-1].split(',') ] for line in lines]) pool = multiprocessing.Pool(num_threads) maps_3d = np.array(sal_maps).reshape(-1, RESNET_SHAPE[0], RESNET_SHAPE[1], 3) maps_2d = np.array(pool.map(visualize_pos_attr, maps_3d)) maps_2d = maps_2d.reshape( len(indices), int(maps_2d.shape[0] // len(indices)), RESNET_SHAPE[0], RESNET_SHAPE[1]) mask_fpath = os.path.join(base_dir, 'data', data, 'val_mask') if data in ['obj', 'scene', 'scene_only']: # MCS and IDR are evaluated on 10000 images and masks are 10x100. # Find the right mask. obj_dict = {'backpack': 0, 'bird': 1, 'dog': 2, 'elephant': 3, 'kite': 4, 'pizza': 5, 'stop_sign': 6, 'toilet': 7, 'truck': 8, 'zebra': 9, } # Loading val_mask from the data directory masks_mat = np.load(tf.gfile.GFile(mask_fpath, 'rb'), allow_pickle=True) # Getting obj indices obj_inds = [obj_dict[i.split('.')[0].split('-')[0]] for i in img_names] # getting indices for a particular object class temp_inds = [int(i.split('.')[0][-2:]) for i in img_names] obj_masks = [masks_mat[obj_inds[i]*100 + temp_inds[i]] for i, _ in enumerate(img_names)] else: obj_masks = [ np.load(tf.gfile.GFile(mask_fpath, 'rb'), allow_pickle=True)[i] for i in indices ] attrs = [] for i in range(len(indices)): attr = single_attr(maps_2d[i], locs[i], obj_masks[i]) attrs.append(attr) out_path = os.path.join(attr_output_dir, img_names[i][:-4] + '_' + str(preds[i])) np.save(tf.gfile.GFile(out_path, 'w'), attr)
#!/usr/bin/env python from __future__ import absolute_import, division, print_function, with_statement import base64 import binascii from contextlib import closing import functools import sys import threading from tornado.escape import utf8 from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.iostream import IOStream from tornado import netutil from tornado.stack_context import ExceptionStackContext, NullContext from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test from tornado.test.util import unittest from tornado.util import u, bytes_type from tornado.web import Application, RequestHandler, url try: from io import BytesIO # python 3 except ImportError: from cStringIO import StringIO as BytesIO class HelloWorldHandler(RequestHandler): def get(self): name = self.get_argument("name", "world") self.set_header("Content-Type", "text/plain") self.finish("Hello %s!" % name) class PostHandler(RequestHandler): def post(self): self.finish("Post arg1: %s, arg2: %s" % ( self.get_argument("arg1"), self.get_argument("arg2"))) class ChunkHandler(RequestHandler): def get(self): self.write("asdf") self.flush() self.write("qwer") class AuthHandler(RequestHandler): def get(self): self.finish(self.request.headers["Authorization"]) class CountdownHandler(RequestHandler): def get(self, count): count = int(count) if count > 0: self.redirect(self.reverse_url("countdown", count - 1)) else: self.write("Zero") class EchoPostHandler(RequestHandler): def post(self): self.write(self.request.body) class UserAgentHandler(RequestHandler): def get(self): self.write(self.request.headers.get('User-Agent', 'User agent not set')) class ContentLength304Handler(RequestHandler): def get(self): self.set_status(304) self.set_header('Content-Length', 42) def _clear_headers_for_304(self): # Tornado strips content-length from 304 responses, but here we # want to simulate servers that include the headers anyway. pass # These tests end up getting run redundantly: once here with the default # HTTPClient implementation, and then again in each implementation's own # test suite. class HTTPClientCommonTestCase(AsyncHTTPTestCase): def get_app(self): return Application([ url("/hello", HelloWorldHandler), url("/post", PostHandler), url("/chunk", ChunkHandler), url("/auth", AuthHandler), url("/countdown/([0-9]+)", CountdownHandler, name="countdown"), url("/echopost", EchoPostHandler), url("/user_agent", UserAgentHandler), url("/304_with_content_length", ContentLength304Handler), ], gzip=True) def test_hello_world(self): response = self.fetch("/hello") self.assertEqual(response.code, 200) self.assertEqual(response.headers["Content-Type"], "text/plain") self.assertEqual(response.body, b"Hello world!") self.assertEqual(int(response.request_time), 0) response = self.fetch("/hello?name=Ben") self.assertEqual(response.body, b"Hello Ben!") def test_streaming_callback(self): # streaming_callback is also tested in test_chunked chunks = [] response = self.fetch("/hello", streaming_callback=chunks.append) # with streaming_callback, data goes to the callback and not response.body self.assertEqual(chunks, [b"Hello world!"]) self.assertFalse(response.body) def test_post(self): response = self.fetch("/post", method="POST", body="arg1=foo&arg2=bar") self.assertEqual(response.code, 200) self.assertEqual(response.body, b"Post arg1: foo, arg2: bar") def test_chunked(self): response = self.fetch("/chunk") self.assertEqual(response.body, b"asdfqwer") chunks = [] response = self.fetch("/chunk", streaming_callback=chunks.append) self.assertEqual(chunks, [b"asdf", b"qwer"]) self.assertFalse(response.body) def test_chunked_close(self): # test case in which chunks spread read-callback processing # over several ioloop iterations, but the connection is already closed. sock, port = bind_unused_port() with closing(sock): def write_response(stream, request_data): stream.write(b"""\ HTTP/1.1 200 OK Transfer-Encoding: chunked 1 1 1 2 0 """.replace(b"\n", b"\r\n"), callback=stream.close) def accept_callback(conn, address): # fake an HTTP server using chunked encoding where the final chunks # and connection close all happen at once stream = IOStream(conn, io_loop=self.io_loop) stream.read_until(b"\r\n\r\n", functools.partial(write_response, stream)) netutil.add_accept_handler(sock, accept_callback, self.io_loop) self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop) resp = self.wait() resp.rethrow() self.assertEqual(resp.body, b"12") self.io_loop.remove_handler(sock.fileno()) def test_streaming_stack_context(self): chunks = [] exc_info = [] def error_handler(typ, value, tb): exc_info.append((typ, value, tb)) return True def streaming_cb(chunk): chunks.append(chunk) if chunk == b'qwer': 1 / 0 with ExceptionStackContext(error_handler): self.fetch('/chunk', streaming_callback=streaming_cb) self.assertEqual(chunks, [b'asdf', b'qwer']) self.assertEqual(1, len(exc_info)) self.assertIs(exc_info[0][0], ZeroDivisionError) def test_basic_auth(self): self.assertEqual(self.fetch("/auth", auth_username="Aladdin", auth_password="open sesame").body, b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==") def test_follow_redirect(self): response = self.fetch("/countdown/2", follow_redirects=False) self.assertEqual(302, response.code) self.assertTrue(response.headers["Location"].endswith("/countdown/1")) response = self.fetch("/countdown/2") self.assertEqual(200, response.code) self.assertTrue(response.effective_url.endswith("/countdown/0")) self.assertEqual(b"Zero", response.body) def test_credentials_in_url(self): url = self.get_url("/auth").replace("http://", "http://me:secret@") self.http_client.fetch(url, self.stop) response = self.wait() self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"), response.body) def test_body_encoding(self): unicode_body = u("\xe9") byte_body = binascii.a2b_hex(b"e9") # unicode string in body gets converted to utf8 response = self.fetch("/echopost", method="POST", body=unicode_body, headers={"Content-Type": "application/blah"}) self.assertEqual(response.headers["Content-Length"], "2") self.assertEqual(response.body, utf8(unicode_body)) # byte strings pass through directly response = self.fetch("/echopost", method="POST", body=byte_body, headers={"Content-Type": "application/blah"}) self.assertEqual(response.headers["Content-Length"], "1") self.assertEqual(response.body, byte_body) # Mixing unicode in headers and byte string bodies shouldn't # break anything response = self.fetch("/echopost", method="POST", body=byte_body, headers={"Content-Type": "application/blah"}, user_agent=u("foo")) self.assertEqual(response.headers["Content-Length"], "1") self.assertEqual(response.body, byte_body) def test_types(self): response = self.fetch("/hello") self.assertEqual(type(response.body), bytes_type) self.assertEqual(type(response.headers["Content-Type"]), str) self.assertEqual(type(response.code), int) self.assertEqual(type(response.effective_url), str) def test_header_callback(self): first_line = [] headers = {} chunks = [] def header_callback(header_line): if header_line.startswith('HTTP/'): first_line.append(header_line) elif header_line != '\r\n': k, v = header_line.split(':', 1) headers[k] = v.strip() def streaming_callback(chunk): # All header callbacks are run before any streaming callbacks, # so the header data is available to process the data as it # comes in. self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8') chunks.append(chunk) self.fetch('/chunk', header_callback=header_callback, streaming_callback=streaming_callback) self.assertEqual(len(first_line), 1) self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n') self.assertEqual(chunks, [b'asdf', b'qwer']) def test_header_callback_stack_context(self): exc_info = [] def error_handler(typ, value, tb): exc_info.append((typ, value, tb)) return True def header_callback(header_line): if header_line.startswith('Content-Type:'): 1 / 0 with ExceptionStackContext(error_handler): self.fetch('/chunk', header_callback=header_callback) self.assertEqual(len(exc_info), 1) self.assertIs(exc_info[0][0], ZeroDivisionError) def test_configure_defaults(self): defaults = dict(user_agent='TestDefaultUserAgent') # Construct a new instance of the configured client class client = self.http_client.__class__(self.io_loop, force_instance=True, defaults=defaults) client.fetch(self.get_url('/user_agent'), callback=self.stop) response = self.wait() self.assertEqual(response.body, b'TestDefaultUserAgent') client.close() def test_304_with_content_length(self): # According to the spec 304 responses SHOULD NOT include # Content-Length or other entity headers, but some servers do it # anyway. # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5 response = self.fetch('/304_with_content_length') self.assertEqual(response.code, 304) self.assertEqual(response.headers['Content-Length'], '42') def test_final_callback_stack_context(self): # The final callback should be run outside of the httpclient's # stack_context. We want to ensure that there is not stack_context # between the user's callback and the IOLoop, so monkey-patch # IOLoop.handle_callback_exception and disable the test harness's # context with a NullContext. # Note that this does not apply to secondary callbacks (header # and streaming_callback), as errors there must be seen as errors # by the http client so it can clean up the connection. exc_info = [] def handle_callback_exception(callback): exc_info.append(sys.exc_info()) self.stop() self.io_loop.handle_callback_exception = handle_callback_exception with NullContext(): self.http_client.fetch(self.get_url('/hello'), lambda response: 1 / 0) self.wait() self.assertEqual(exc_info[0][0], ZeroDivisionError) @gen_test def test_future_interface(self): response = yield self.http_client.fetch(self.get_url('/hello')) self.assertEqual(response.body, b'Hello world!') @gen_test def test_future_http_error(self): try: yield self.http_client.fetch(self.get_url('/notfound')) except HTTPError as e: self.assertEqual(e.code, 404) self.assertEqual(e.response.code, 404) @gen_test def test_reuse_request_from_response(self): # The response.request attribute should be an HTTPRequest, not # a _RequestProxy. # This test uses self.http_client.fetch because self.fetch calls # self.get_url on the input unconditionally. url = self.get_url('/hello') response = yield self.http_client.fetch(url) self.assertEqual(response.request.url, url) self.assertTrue(isinstance(response.request, HTTPRequest)) response2 = yield self.http_client.fetch(response.request) self.assertEqual(response2.body, b'Hello world!') class RequestProxyTest(unittest.TestCase): def test_request_set(self): proxy = _RequestProxy(HTTPRequest('http://example.com/', user_agent='foo'), dict()) self.assertEqual(proxy.user_agent, 'foo') def test_default_set(self): proxy = _RequestProxy(HTTPRequest('http://example.com/'), dict(network_interface='foo')) self.assertEqual(proxy.network_interface, 'foo') def test_both_set(self): proxy = _RequestProxy(HTTPRequest('http://example.com/', proxy_host='foo'), dict(proxy_host='bar')) self.assertEqual(proxy.proxy_host, 'foo') def test_neither_set(self): proxy = _RequestProxy(HTTPRequest('http://example.com/'), dict()) self.assertIs(proxy.auth_username, None) def test_bad_attribute(self): proxy = _RequestProxy(HTTPRequest('http://example.com/'), dict()) with self.assertRaises(AttributeError): proxy.foo def test_defaults_none(self): proxy = _RequestProxy(HTTPRequest('http://example.com/'), None) self.assertIs(proxy.auth_username, None) class HTTPResponseTestCase(unittest.TestCase): def test_str(self): response = HTTPResponse(HTTPRequest('http://example.com'), 200, headers={}, buffer=BytesIO()) s = str(response) self.assertTrue(s.startswith('HTTPResponse(')) self.assertIn('code=200', s) class SyncHTTPClientTest(unittest.TestCase): def setUp(self): if IOLoop.configured_class().__name__ == 'TwistedIOLoop': # TwistedIOLoop only supports the global reactor, so we can't have # separate IOLoops for client and server threads. raise unittest.SkipTest( 'Sync HTTPClient not compatible with TwistedIOLoop') self.server_ioloop = IOLoop() sock, self.port = bind_unused_port() app = Application([('/', HelloWorldHandler)]) server = HTTPServer(app, io_loop=self.server_ioloop) server.add_socket(sock) self.server_thread = threading.Thread(target=self.server_ioloop.start) self.server_thread.start() self.http_client = HTTPClient() def tearDown(self): self.server_ioloop.add_callback(self.server_ioloop.stop) self.server_thread.join() self.server_ioloop.close(all_fds=True) def get_url(self, path): return 'http://localhost:%d%s' % (self.port, path) def test_sync_client(self): response = self.http_client.fetch(self.get_url('/')) self.assertEqual(b'Hello world!', response.body) def test_sync_client_error(self): # Synchronous HTTPClient raises errors directly; no need for # response.rethrow() with self.assertRaises(HTTPError) as assertion: self.http_client.fetch(self.get_url('/notfound')) self.assertEqual(assertion.exception.code, 404)
# Copyright 2017 Keegan Joseph Brophy # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import * from Core import * app = Flask(__name__) @app.route('/test_csv') def test_csv(): prof = Person.select() targ = open('CSV test', 'w') targ.write('Course#, Title, EE, CoE, # Stud., Instructor, Load, Cnt., Location, Cnt., Sctn., Length, Location, Cnt., Location') for i in prof: prof_id=i.id term = Semester.select().where(Semester.year == 2008,Semester.session==01) list_of_offerings = list() person = Person.get(Person.id == prof_id) for x in term: offering = (Activity .select() .join(Offering) .where(Activity.subject == prof_id,Offering.semester==x.id) .order_by(Offering.semester.desc())) for y in offering: list_of_offerings.append(y) list_offering_id = list() for x in list_of_offerings: list_offering_id.append(x.offering.id) list_offering_id.sort() Stotal = 0 Ptotal = 0 Ototal = 0 Snum = (Supervision .select() .join(Activity) .where(Activity.subject == prof_id)) list_supervision_date = list() list_supervision_value = list() for num in Snum: Ssum = Activity.select().where(Activity.supervision==num.id).get() Stotal += num.supervision_class_id.weight*Ssum.split list_supervision_date.append(Ssum.supervision.semester.year) list_supervision_date.append(Ssum.supervision.semester.session) list_supervision_value.append(num.supervision_class_id.weight * Ssum.split) list_forviewer = dict() list_split = dict() offering_value_date = list() counter =- 1 for num in list_of_offerings: counter += 1 Osum = Activity.select().where(Activity.offering==num.offering.id).get() var1 = weight_calc(Osum.offering.id) Ototal += var1*Osum.split list_forviewer[Osum.offering.id] = var1 list_split[Osum.offering.id] = Osum.split offering_value_date.append((str(Osum.offering.semester.year)+'0'+str(Osum.offering.semester.session))) offering_value_date.append(var1*Osum.split) list_project_supervision_date = list() list_project_supervision_value = list() Pnum = (ProjectSupervision .select() .join(Activity) .where(Activity.subject == prof_id)) for num in Pnum: Psum = Activity.select().where(Activity.project == num.id).get() Ptotal += num.project_class_id.weight*Psum.split list_project_supervision_date.append(Psum.project.semester.year) list_project_supervision_date.append(Psum.project.semester.session) list_project_supervision_value.append(num.project_class_id.weight * Psum.split) for x in list_of_offerings: if x.offering.generation.other_info=='None' or x.offering.generation.other_info=='36-hour field school conducted during the first two weeks of the semester' or x.offering.generation.other_info=='meetings with project supervisor as required' or x.offering.generation.other_info=='weekly meetings with project supervisor' or x.offering.generation.other_info==None: tut='' else: tut=x.offering.generation.other_info targ.write('\n') targ.write(str(x.offering.generation.course.code)+','+str(x.offering.generation.title)+','+''+','+''+','+str(x.offering.enrolment)+','+str(person.name)+','+str(weight_calc(x.offering.id))+','+str(x.offering.generation.lecture_hours)+','+''+','+str(x.offering.generation.labs)+','+str(x.offering.sections)+','+''+','+''+','+str(tut)+','+'') return redirect('/') @app.route('/test_csv2') def test_csv2(): """ planed test for new csv styles """ first_year=2011 seconf_year=2011 targ = open('CSV test2', 'w') targ.write('Name,'+str(first_year)+', Base, Load, F'+str(first_year)+', W'+str(first_year+1)+', S'+str(first_year+1)+', Other \n') master= Activity.select().join(Person, on=Activity.subject) for m in master: if m.offering.semester.year==first_year: try: deficit2=PersonalLoad.select().where(instructor=m.subject.id,PersonalLoad.end.year>=first_year).order_by(PersonalLoad.end.asc()).get() except: deficit2=PersonalLoad.select().where(instructor=m.subject.id).order_by(PersonalLoad.end.asc()).get() print deficit2.deficit Ototal=0 list_of_offerings=list() offering = (Activity .select() .join(Offering) .where(Activity.subject == m.subject, Offering.semester <= first_year) .order_by(Offering.semester.desc())) for y in offering: list_of_offerings.append(y) for num in list_of_offerings: var1 = weight_calc(num.offering.id) Ototal += var1 * m.split defi=deficit_func(m.subject.id, 2008, first_year) targ.write(str(m.subject.name)+','+str(defi)+','+str(deficit2.deficit)+','+str(Ototal)+','+'\n') return redirect('/') @app.route('/export', methods=['GET','POST']) def docustomexport(): if request.method == 'POST': selector = request.form.get('Select') export_file(selector) name = selector+'.csv' return send_file(name,mimetype=None,as_attachment=True) return render_template('export.html') @app.route('/import', methods=['GET', 'POST']) def docustomimport(): if request.method == 'POST': selector = request.form.get('Select') import_file(selector) return render_template('import.html') @app.route('/favicon.ico') def favicon(): # noinspection PyUnresolvedReferences return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon') @app.route("/profile/<prof_id>/<year>/<reports>", methods=['GET', 'POST']) def Profile(prof_id,year,reports,): term=termselect(year) list_of_offerings = list() for x in term: person = Person.get(Person.id == prof_id) supervision = (Supervision .select() .join(Activity) .where(Activity.subject == prof_id) .order_by(Supervision.semester.desc())) projectsupervision = (ProjectSupervision .select() .join(Activity) .where(Activity.subject == prof_id) .order_by(ProjectSupervision.semester.desc())) offering = (Activity .select() .join(Offering) .where(Activity.subject == prof_id,Offering.semester==x.id) .order_by(Offering.semester.desc())) for y in offering: list_of_offerings.append(y) list_offering_id = list() for x in list_of_offerings: list_offering_id.append(x.offering.id) list_offering_id.sort() adjustment = (Adjustment .select() .join(Person) .where(Person.id == prof_id) .order_by(Adjustment.id.desc())) Stotal = 0 Ptotal = 0 Ototal = 0 Snum = (Supervision .select() .join(Activity) .where(Activity.subject == prof_id)) list_supervision_date = list() list_supervision_value = list() for num in Snum: Ssum = Activity.select().where(Activity.supervision==num.id).get() Stotal += num.supervision_class_id.weight*Ssum.split list_supervision_date.append(Ssum.supervision.semester.year) list_supervision_date.append(Ssum.supervision.semester.session) list_supervision_value.append(num.supervision_class_id.weight * Ssum.split) list_forviewer = dict() list_split = dict() offering_value_date = list() counter =- 1 for num in list_of_offerings: counter += 1 Osum = Activity.select().where(Activity.offering==num.offering.id).get() var1 = weight_calc(Osum.offering.id) Ototal += var1*Osum.split list_forviewer[Osum.offering.id] = var1 list_split[Osum.offering.id] = Osum.split offering_value_date.append((str(Osum.offering.semester.year)+'0'+str(Osum.offering.semester.session))) offering_value_date.append(var1*Osum.split) list_project_supervision_date = list() list_project_supervision_value = list() Pnum = (ProjectSupervision .select() .join(Activity) .where(Activity.subject == prof_id)) for num in Pnum: Psum = Activity.select().where(Activity.project == num.id).get() Ptotal += num.project_class_id.weight*Psum.split list_project_supervision_date.append(Psum.project.semester.year) list_project_supervision_date.append(Psum.project.semester.session) list_project_supervision_value.append(num.project_class_id.weight * Psum.split) Atotal = (Person .select() .where(Person.id == prof_id) .join(Adjustment) .select(fn.SUM(Adjustment.weight)) .scalar()) term2=Semester.select().order_by(Semester.year.asc()).get() defi = deficit_func(prof_id,None,2016) # TODO: fix cumulative load calc. deficit2=PersonalLoad.select().join(Person).where(Person.id==prof_id) if Ototal is None: Ototal = 0 if Atotal is None: Atotal = 0 if Stotal is None: Stotal = 0 if Ptotal is None: Ptotal = 0 total = Ptotal + Atotal + Stotal + Ototal - defi if request.method == 'POST': if request.form['subm1'] == "Supervisions CSV": anyplot(list_supervision_date, 'super for id '+str(prof_id), list_supervision_value) return send_file('super for id '+str(prof_id)+'.pdf') if request.form['subm1'] == "Project Supervisions CSV": anyplot(list_project_supervision_date, 'project for id '+str(prof_id), list_project_supervision_value) return send_file('project for id '+str(prof_id)+'.pdf') if request.form['subm1'] == "Offerings CSV": print 'asd' offerplot(offering_value_date, 'offer for id '+str(prof_id)) return send_file('offer for id '+str(prof_id)+'.pdf') if request.form['subm1'] == "update": name = request.form['name'] email = request.form['email'] start = request.form['start'] varyear= Semester.select().where(Semester.year==start,Semester.session==01).get() A=Person.update(name=name,email=email,start=varyear.id).where(Person.id==prof_id) A.execute() Adjustment.create(comment=("Person table update, name + "+str(name)+" + email + "+str(email)+" + start + "+str(start)+" +"), instructor=prof_id) if request.form['subm1'] == "adjustment": weight = request.form['weight'] comment = request.form['comment'] Adjustment.create(weight=weight, comment=comment, instructor=prof_id) # if request.form['subm1'] == "deficit": # defi2=Deficit.select() # for x in defi2: # if 'applied_start'+str(x.id) in request.form: # if request.form['applied_start'+str(x.id)]!='': # a = Deficit.update(applied_start=int(request.form['applied_start'+str(x.id)])).where(Deficit.id == x.id) # a.execute() # if 'applied_final'+str(x.id) in request.form: # if request.form['applied_start'+str(x.id)]!='': # b = Deficit.update(applied_final=int(request.form['applied_final'+str(x.id)])).where(Deficit.id == x.id) # b.execute() # if 'deficit'+str(x.id) in request.form: # if request.form['applied_start'+str(x.id)]!='': # c = Deficit.update(deficit=float(request.form['deficit'+str(x.id)])).where(Deficit.id == x.id) # c.execute() # deficit3 = request.form['deficit3'] # applied_start = request.form['applied_start'] # # var = Deficit.select().where(Deficit.applied==prof_id).order_by(Deficit.applied_start.desc()).get() # if deficit3=="": # deficit3=4.0 # applied_start=int(applied_start) # var2 = int(var.applied_start) # if var2>=applied_start: # return 'error' # A=Deficit.update(applied_final=applied_start).where(Deficit.applied==prof_id,Deficit.applied_final==None) # A.execute() # Deficit.create(deficit=deficit3,applied=prof_id, applied_start=applied_start) # Adjustment.create(comment=("Deficit table update, applied_start"+str(applied_start)+" deficit"+str(deficit3)), applied=prof_id) if request.form['subm1'] == "offering": off=Offering.select() for id in off: print request.form['applied_start'+str(id.id)] if 'enroll' + str(id.id) in request.form: l = Offering.update(enrolment=(int(request.form['applied_start'+str(id.id)]))).where(Offering.id == id.id) l.execute() Adjustment.create(comment=('enrolment in + '+str(id.id)+' + offering to become + '+str(int(request.form['applied_start'+str(id.id)]))+' +')) if reports==True: return total, defi, offering_value_date, list_project_supervision_date, list_project_supervision_value, list_supervision_value, list_supervision_date return render_template("profilehist.html", person=person, supervision=supervision,instructor=prof_id, projectsupervision=projectsupervision, offering=list_of_offerings, adjustment=adjustment,total=total, Stotal=Stotal, Ptotal=Ptotal, Ototal=Ototal, deficit=defi, list_forviewer=list_forviewer,list_split=list_split ,year=year, reports=reports, Deficit=deficit2) @app.route('/listm', methods=['GET', 'POST']) def listm(): if request.method == 'POST': if request.form['subm1'] == "submit1": try: labs = request.form['labs'] credit_hours = request.form['credit_hours'] lecture_hours = request.form['lecture_hours'] title = request.form['title'] comments = request.form['comments'] course = int(request.form['course']) A=Course.get_or_create(code=course,subject='ENGI') other_info = request.form['other_info'] previous_course = request.form['previous_course'] start_year = request.form['start_year'] end_year = request.form['end_year'] if end_year != '' and start_year != '' and labs != '' and credit_hours != '' and lecture_hours != '' and title != '' and course != '': A=CourseGeneration.create(labs=labs, credit_hours=credit_hours, lecture_hours=lecture_hours, title=title, comments=comments, other_info=other_info, previous_course=previous_course, start_year=start_year, end_year=end_year, course=A[0].id, reviewed=True) B = CourseGeneration.select().where(CourseGeneration.labs == labs, CourseGeneration.credit_hours == credit_hours, CourseGeneration.lecture_hours == lecture_hours, CourseGeneration.title == title, CourseGeneration.comments == comments, CourseGeneration.other_info == other_info, CourseGeneration.previous_course == previous_course, CourseGeneration.start_year == start_year, CourseGeneration.end_year == end_year, CourseGeneration.course == A[0].id, CourseGeneration.reviewed==True).get() Adjustment.create(comment='Created a course generation ' + str(B.id), overide_address='CourseGeneration.' + str(B.id)) except: pass if request.form['subm1'] == "submit3": try: instructor = request.form['instructor'] offering = request.form['offering'] supervision = request.form['supervision'] project = request.form['pid'] role = request.form['rid'] split = request.form['split'] if instructor=='': error='instructor is none' if supervision=='': supervision=None if offering=='': offering=None if project=='': project=None if role=='': role=None if split=='': split = 1 Activity.create(subject=instructor, offering=offering, supervision=supervision,project=project, role=role, split=split) B=Activity.select().where(Activity.subject==instructor, Activity.offering==offering, Activity.supervision==supervision,Activity.project==project, Activity.role==role, Activity.split==split).get() Adjustment.create(comment='Created Teaching paring ' + str(B.id), overide_address='Activity.' + str(B.id)) except: pass if request.form['subm1'] == "submit2": try: enrolment = request.form['enrolment'] semester = request.form['semester'] generation = request.form['generation'] sections = request.form['sections'] Offering.create(enrolment=enrolment, semester=semester, generation=generation,sections=sections,reviewed=True) B=Offering.select().where(Offering.enrolment==enrolment, Offering.semester==semester, Offering.generation==generation,Offering.sections==sections,Offering.reviewed==True).get() Adjustment.create(comment='Created Offering ' + str(B.id), overide_address='Offering.' + str(B.id)) except: pass if request.form['subm1'] == "submit4": try: year = request.form['year'] session = request.form['session'] Semester.create(year=year,session=session) B=Semester.select().where(Semester.year==year,Semester.session==session) Adjustment.create(comment='Created Semester ' + str(B.id), overide_address='Semester.' + str(B.id)) except: pass if request.form['subm1'] == "update info": enrol = request.form['enroll'] ooid = request.form['offering'] sections = request.form['sections'] sections = int(sections) enrol = int(enrol) ooid = int(ooid) A=Offering.update(enrolment=enrol, sections=sections).where(Offering.id==ooid) A.execute() print 'i updated?' Adjustment.create(comment=('enrolment in + '+str(ooid)+' + offering to become + '+str(enrol)+' +')) if request.form['subm1'] == "reviewed": person=Person.select() for id in person: # print request.form['name'+str(id.id)] != "" # if request.form['name'+str(id.id)] != "": # name = request.form['name'] # email = request.form['email'] # id = request.form['professrid'] # print id # print email # print name # A = Person.update(name=name, email=email).where(Person.id == id.id) # A.execute() # Adjustment.create(comment=( # "Person table update, name + " + str(name) + " + email + " + str(email) + " + start + "), # instructor=id, reviewed=True) if 'cbox1'+str(id.id) in request.form: a = Person.update(reviewed=True).where(Person.id == id.id) a.execute() else: pass course = Course.select() for x in course: if 'cbox2' + str(id.id) in request.form: a = Course.update(reviewed=True).where(Course.id == x.id) a.execute() else: pass coursegen = CourseGeneration.select() for x in coursegen: if 'cbox3' + str(id.id) in request.form: a = CourseGeneration.update(reviewed=True).where(CourseGeneration.id == x.id) a.execute() else: pass offering = Offering.select() for x in offering: if 'cbox4' + str(id.id) in request.form: a = Offering.update(reviewed=True).where(Offering.id == x.id) a.execute() else: pass # if request.form['subm1'] == "PURGE1": # purge=request.form['purgeid1'] # print purge # a=Person.delete().where(Person.id==purge) # a.execute() # if request.form['subm1'] == "PURGE2": # purge=request.form['purgeid2'] # a=Course.delete().where(Course.id==purge) # a.execute() # if request.form['subm1'] == "PURGE3": # purge=request.form['purgeid3'] # a=Offering.delete().where(Offering.id==purge) # a.execute() # if request.form['subm1'] == "PURGE4": # purge=request.form['purgeid4'] # a=CourseGeneration.delete().where(CourseGeneration.id==purge) # a.execute() mastermany = Activity.select().order_by(Activity.offering.asc()) return render_template("masterlist.html", Person=Person, ProjectType=ProjectType, Course=Course, SupervisionClass=SupervisionClass, ProjectClass=ProjectClass, ProjectSupervision=ProjectSupervision, Supervision=Supervision, Adjustment=Adjustment, Role=Role, Semester=Semester, Offering=Offering, CourseGeneration=CourseGeneration, Student=Student,Activity=mastermany) @app.route('/yearly/<year>/', methods=['GET', 'POST']) def reports(year): list_total = list() reports=True term=termselect(year) targ = open('asd', 'w') if term!=True: for x in term: var1=str(x.year) list_of_teachers1 = list() master=Activity.select().join(Offering).where(Activity.offering==Offering.id,Offering.semester==x.id) for y in master: list_of_teachers1.append(y.subject) list_of_teachers2=set(list_of_teachers1) for z in list_of_teachers2: total, defi, offering_value_date, list_project_supervision_date, list_project_supervision_value, list_supervision_value, list_supervision_date = Profile(z.id, var1, reports) if x.session==1: targ.write('made to date ' + str(defi + total)) targ.write('\n') targ.write('total deficit ' + str(total)) targ.write('\n') targ.write(str(z.id) + ' ' + str(z.name)) targ.write('\n') targ.write(str(x.year) + str(x.session)) targ.write('\n') targ.write('\n') targ.write('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++') targ.write('\n') list_total.append(str(x.year)+'0'+str(x.session)) list_total.append(total) offerplot(list_total,'Deficit','offer') return redirect('/')
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception Resnet v2 Faster R-CNN implementation. See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) as well as "Speed/accuracy trade-offs for modern convolutional object detectors" by Huang et al. (https://arxiv.org/abs/1611.10012) """ import tensorflow as tf from tensorflow.contrib import slim as contrib_slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper from nets import inception_resnet_v2 slim = contrib_slim class FasterRCNNInceptionResnetV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights) as scope: return inception_resnet_v2.inception_resnet_v2_base( preprocessed_inputs, final_endpoint='PreAuxLogits', scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True) def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv_1 = slim.conv2d( tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d( tower_conv1, 288, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_conv2 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d( tower_conv2_1, 320, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.max_pool2d( proposal_feature_maps, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) net = inception_resnet_v2.block8(net, activation_fn=None) proposal_classifier_features = slim.conv2d( net, 1536, 1, scope='Conv2d_7b_1x1') return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for InceptionResnetV2 checkpoints. TODO(jonathanhuang,rathodv): revisit whether it's possible to force the `Repeat` namescope as created in `_extract_box_classifier_features` to start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can be used. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') var_name = var_name.replace( second_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore
"""plugins template tag root hooks. """ from django.conf import settings from django.db.models.loading import get_app from django import template from inspect import getargspec from django.template.context import Context from django.utils.functional import curry from django.utils.encoding import smart_str from django.template import loader, VariableDoesNotExist, Variable, Node from django.template import TemplateDoesNotExist, TemplateSyntaxError from django.core.cache.backends.locmem import CacheClass as LocalMemCache TAG_KEYWORD_ARGUMENT_SEPARATOR = '=' ## RED_FLAG: this feels wrong... #from app_plugins.models import PluginPoint, Plugin #from app_plugins.models import is_valid_label, construct_template_path models = get_app('app_plugins') APP_PLUGINS_CACHE_PARAMS = getattr(settings, 'APP_PLUGINS_CACHE_PARAMS', {'cull_frequency': 4, 'max_entries': 3000, 'timeout': 60*60*24*3, # 3 days }) app_plugin_apps_with_templates = LocalMemCache('localhost', APP_PLUGINS_CACHE_PARAMS) # at import cache the app names for indexing app_names = [] for app in settings.INSTALLED_APPS: name = app.split('.')[-1] if name not in app_names: app_names.append(name) app_names = tuple(app_names) def callback(func, variables, context, takes_context): """ resolve an iterable of Variable objects into a list of args and a dict of keyword arguments. support full python style keyword argument processing:: >>> def foo(a, b, c=1, d=2): ... pass >>> foo(1, 2) >>> foo(1, b=2) >>> foo(b=2, a=1, d=3) """ name = getattr(func, "_decorated_function", func).__name__ params, varargs, varkw, defaults = getargspec(func) if takes_context: if params[0] == 'context': params.pop(0) else: raise TemplateSyntaxError( "Any tag function decorated with takes_context=True " "must have a first argument of 'context'") num_defaults = len(defaults) num_params = len(params) num_req = num_params - num_defaults args = [] kwdargs = {} found_kwd = False for variable in variables: if not found_kwd: try: args.append(variable.resolve(context)) except VariableDoesNotExist: if variable.var.count(TAG_KEYWORD_ARGUMENT_SEPARATOR) != 1: raise found_kwd = True if found_kwd: try: var, path = variable.var.split(TAG_KEYWORD_ARGUMENT_SEPARATOR) except ValueError: raise TemplateSyntaxError( "Expected keyword assignemnt, found '%s' instead" % variable.var) if params and not varkw and name not in params: raise TemplateSyntaxError( "%s got an unexpected keyword argument '%s'" % (name, var)) if var in kwdargs: raise TemplateSyntaxError( "%s got multiple values for keyword argument '%s'" % (name, var)) kwdargs[smart_str(var)] = Variable(path).resolve(context) num_args = len(args) num_kwds = len(kwdargs) num_all = num_args + num_kwds if ((num_args > num_params and not varargs) or (num_all> num_params and not varkw)): raise TemplateSyntaxError( "%s takes at most %s arguments. (%s given)" % ( name, num_params, num_all) ) if num_args != num_req: if num_args > num_req: # some args are kwd args (maybe multiple keyword error) if not varargs: allowed = set(params[num_args:]) not_allowed = set(kwdargs) - allowed if not_allowed: raise TemplateSyntaxError( "%s got multiple values for keyword arguments: %s" % ( name, ", ".join(not_allowed) )) elif not varkw: # not enough required parameters error required = set(params[num_args:-num_default]) missing = required - set(kwdargs) if missing: raise TemplateSyntaxError( "%s takes at least %s non-keyword arguments (%s given)" % ( name, num_req, num_args)) if takes_context: args.insert(0, context) return func(*args, **kwdargs) def compiler(node_class, parser, token): bits = token.split_contents()[1:] return node_class(bits) def inclusion_kwdtag(register, file_name, context_class=Context, takes_context=False): def dec(func): class InclusionKwdNode(Node): def __init__(self, vars_to_resolve): self.vars_to_resolve = map(Variable, vars_to_resolve) def render(self, context): new_context = callback(func, self.vars_to_resolve, context, takes_context) if not getattr(self, 'nodelist', False): if (not isinstance(file_name, basestring) and is_iterable(file_name)): t = loader.select_template(file_name) else: t = loader.get_template(file_name) self.nodelist = t.nodelist res = self.nodelist.render(context_class(new_context, autoescape=context.autoescape)) context.pop() # local context context.pop() # args context.pop() # callback context (or empty) return res compile_func = curry(compiler, InclusionKwdNode) compile_func.__doc__ = func.__doc__ register.tag(getattr(func, "_decorated_function", func).__name__, compile_func) return func return dec register = template.Library() @register.filter def template_exists(templ): if templ is None: return False try: #loader.get_template(templ) loader.find_template_source(templ) except TemplateDoesNotExist: return False return True def validate_name(name): ## red_flag: turn into a string if not models.is_valid_label(name): raise TemplateSyntaxError, "invalid plugin point name '%s'." % name @inclusion_kwdtag(register, "app_plugins/app_plugin.html", takes_context=True) def app_plugin(context, app, name, plugin=None, user=None, args=None, ext='.html', **extra_args): validate_name(app) validate_name(name) if plugin is None: try: plugin = models.Plugin.objects.get(label=u'.'.join([app, name])) except models.Plugin.DoesNotExist: pass nc = context if args is None: args = extra_args else: args.update(extra_args) template = '' if plugin is None: template = models.construct_template_path(app, name, ext) nc.push() else: nc.update(plugin.call(nc, user, **args)) template = plugin.template nc.update(args) nc.push() nc['app_plugin'] = plugin nc['app_plugin_app'] = app nc['app_plugin_point'] = name nc['app_plugin_args'] = args nc['app_plugin_user'] = user nc['app_plugin_template'] = template return nc @inclusion_kwdtag(register, "app_plugins/plugin_point.html", takes_context=True) def plugin_point(context, name, point=None, user=None, ext='.html', **args): validate_name(name) if point is None: try: point = models.PluginPoint.objects.select_related().get(label=name) except models.PluginPoint.DoesNotExist: pass nc = context plugins = None if point is None: apps = app_plugin_apps_with_templates.get(name+ext, None) if apps is None: tpls = ((app, models.construct_template_path(app, name, ext)) for app in app_names) apps = [ app for app, tpl in tpls if template_exists(tpl) ] app_plugin_apps_with_templates.set(name+ext, apps) nc.push() else: nc.update(point.call(nc, user, **args)) plugins = point.get_plugins(user) apps = [ p.app for p in plugins ] nc.update(args) nc.push() nc['app_plugin_ext'] = ext nc['app_plugin_point'] = name nc['app_plugin_apps'] = apps nc['app_plugin_args'] = args nc['app_plugin_user'] = user nc['app_plugin_plugins'] = plugins return nc
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrapper for prefetching_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import resource_variable_ops class _PerDeviceGenerator(dataset_ops.DatasetV2): """A `dummy` generator dataset.""" def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id, source_device, element_structure): self._structure = element_structure multi_device_iterator_string_handle = ( gen_dataset_ops.multi_device_iterator_to_string_handle( multi_device_iterator_resource)) # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun(autograph=False) # Pure graph code. def _init_func(): return multi_device_iterator_string_handle init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun(autograph=False) # Pure graph code. def _remote_init_func(): return functional_ops.remote_call( target=source_device, args=init_func_concrete.captured_inputs, Tout=[dtypes.string], f=init_func_concrete) self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access self._init_captured_args = self._init_func.captured_inputs # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], autograph=False) # Pure graph code. def _next_func(string_handle): # pylint: disable=protected-access multi_device_iterator = ( gen_dataset_ops.multi_device_iterator_from_string_handle( string_handle=string_handle, output_types=self._structure._flat_types, output_shapes=self._structure._flat_shapes)) return gen_dataset_ops.multi_device_iterator_get_next_from_shard( multi_device_iterator=multi_device_iterator, shard_num=shard_num, incarnation_id=incarnation_id, output_types=self._structure._flat_types, output_shapes=self._structure._flat_shapes) next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun_with_attributes( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], attributes={"experimental_ints_on_device": True}, autograph=False) # Pure graph code. def _remote_next_func(string_handle): return functional_ops.remote_call( target=source_device, args=[string_handle] + next_func_concrete.captured_inputs, Tout=self._structure._flat_types, # pylint: disable=protected-access f=next_func_concrete) self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access self._next_captured_args = self._next_func.captured_inputs self._incarnation_id_index = -1 for i, arg in enumerate(self._next_captured_args): if arg == incarnation_id: self._incarnation_id_index = i # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], autograph=False) # Pure graph code. def _finalize_func(unused_string_handle): return array_ops.constant(0, dtypes.int64) finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], autograph=False) # Pure graph code. def _remote_finalize_func(string_handle): return functional_ops.remote_call( target=source_device, args=[string_handle] + finalize_func_concrete.captured_inputs, Tout=[dtypes.int64], f=finalize_func_concrete) self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access ) self._finalize_captured_args = self._finalize_func.captured_inputs variant_tensor = gen_dataset_ops.generator_dataset( self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **dataset_ops.flat_structure(self)) super(_PerDeviceGenerator, self).__init__(variant_tensor) def _inputs(self): # TODO(b/116506223): Determine which datasets should be used as inputs here. return [] @property def _element_structure(self): return self._structure class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2): """Creates a _PerDeviceGenerator-like dataset with a new incarnation_id. Re-uses the functions from the provided per_device_dataset and just switches out the function argument corresponding to the incarnation_id. """ def __init__(self, per_device_dataset, incarnation_id): # pylint: disable=protected-access self._structure = per_device_dataset._structure self._init_func = per_device_dataset._init_func self._init_captured_args = self._init_func.captured_inputs self._next_func = per_device_dataset._next_func self._next_captured_args = per_device_dataset._next_captured_args # The captured arguments to the next_func are string_handle, incarnation_id. # We update the incarnation id to the new one. self._next_captured_args[ per_device_dataset._incarnation_id_index] = incarnation_id self._finalize_func = per_device_dataset._finalize_func self._finalize_captured_args = per_device_dataset._finalize_captured_args variant_tensor = gen_dataset_ops.generator_dataset( self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **dataset_ops.flat_structure(self)) super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor) def _inputs(self): # TODO(b/116506223): Determine which datasets should be used as inputs here. return [] @property def _element_structure(self): return self._structure class MultiDeviceIterator(object): """An iterator over multiple devices.""" def __init__(self, dataset, devices, max_buffer_size=1, prefetch_buffer_size=1, source_device="/cpu:0"): """Constructs a MultiDeviceIterator. Args: dataset: The input dataset to be iterated over. devices: The list of devices to fetch data to. max_buffer_size: Maximum size of the host side per device buffer to keep. prefetch_buffer_size: if > 1, then we setup a buffer on each device to prefetch into. source_device: The host device to place the `dataset` on. In order to prevent deadlocks, if the prefetch_buffer_size is greater than the max_buffer_size, we set the max_buffer_size to prefetch_buffer_size. Raises: RuntimeError: If run in Eager mode. """ self._dataset = dataset._apply_options() # pylint: disable=protected-access self._devices = devices self._source_device = source_device self._source_device_tensor = ops.convert_to_tensor(source_device) self._max_buffer_size = max_buffer_size self._prefetch_buffer_size = prefetch_buffer_size if self._prefetch_buffer_size > self._max_buffer_size: self._max_buffer_size = self._prefetch_buffer_size # Create the MultiDeviceIterator. with ops.device(self._source_device): # TODO(b/121378567): Get rid of this shared_name hack. shared_name = "" if context.executing_eagerly(): shared_name = context.shared_name() self._multi_device_iterator_resource = ( gen_dataset_ops.multi_device_iterator( devices=self._devices, shared_name=shared_name, container="", **dataset_ops.flat_structure(self._dataset))) if context.executing_eagerly(): # Delete the resource when this object is deleted self._resource_deleter = resource_variable_ops.EagerResourceDeleter( handle=self._multi_device_iterator_resource, handle_device=self._source_device) # The incarnation ID is used to ensure consistency between the per-device # iterators and the multi-device iterator. self._incarnation_id = gen_dataset_ops.multi_device_iterator_init( self._dataset._variant_tensor, # pylint: disable=protected-access self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size) self._prototype_device_datasets = [] for i, device in enumerate(self._devices): with ops.device(device): ds = _PerDeviceGenerator( i, self._multi_device_iterator_resource, self._incarnation_id, self._source_device_tensor, self._dataset._element_structure) # pylint: disable=protected-access self._prototype_device_datasets.append(ds) # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to # initialize the device side of the pipeline. This would allow the # MultiDeviceIterator to choose, for example, to move some transformations # into the device side from its input. It might be useful in rewriting. # Create the per device iterators. self._device_iterators = [] for i, device in enumerate(self._devices): with ops.device(device): ds = self._create_device_dataset(i) if context.executing_eagerly(): self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds)) else: self._device_iterators.append( dataset_ops.make_initializable_iterator(ds)) if not context.executing_eagerly(): device_iterator_initializers = [ iterator.initializer for iterator in self._device_iterators ] self._initializer = control_flow_ops.group(*device_iterator_initializers) def _create_device_dataset(self, i): """Uses _prototype_device_datasets[i] to build a dataset for the device.""" ds = self._prototype_device_datasets[i] ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id) if self._prefetch_buffer_size > 0: ds = ds.prefetch(self._prefetch_buffer_size) # TODO(jsimsa): Enable auto-tuning and optimizations when supported for # non-CPU devices. options = dataset_ops.Options() options.experimental_autotune = False options.experimental_optimization.apply_default_optimizations = False ds = ds.with_options(options) return ds def get_next(self, device=None): """Returns the next element given a `device`, else returns all in a list.""" if device is not None: index = self._devices.index(device) return self._device_iterators[index].get_next() result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append(self._device_iterators[i].get_next()) return result def get_next_as_optional(self): result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append(iterator_ops.get_next_as_optional( self._device_iterators[i])) return result @property def initializer(self): if context.executing_eagerly(): return control_flow_ops.no_op() return self._initializer def _eager_reset(self): """Resets the MultiDeviceIterator in eager mode.""" if not context.executing_eagerly(): raise ValueError("Eager reset is only supported in eager mode.") # pylint: disable=protected-access self._incarnation_id = gen_dataset_ops.multi_device_iterator_init( self._dataset._variant_tensor, self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size) for i, device in enumerate(self._devices): with ops.device(device): ds = self._create_device_dataset(i) # Reset the device iterator resources with the new dataset. ds_variant = ds._variant_tensor gen_dataset_ops.make_iterator( ds_variant, self._device_iterators[i]._iterator_resource) @property def _element_structure(self): return dataset_ops.get_structure(self._dataset)
from __future__ import division from io import BytesIO import math import numpy from PIL import Image from six import PY3 from ._image import window_batch_bchw from . import ExpectsAxisLabels, SourcewiseTransformer from .. import config class ImagesFromBytes(SourcewiseTransformer): """Load from a stream of bytes objects representing encoded images. Parameters ---------- data_stream : instance of :class:`AbstractDataStream` The wrapped data stream. The individual examples returned by this should be the bytes (in a `bytes` container on Python 3 or a `str` on Python 2) comprising an image in a format readable by PIL, such as PNG, JPEG, etc. color_mode : str, optional Mode to pass to PIL for color space conversion. Default is RGB. If `None`, no coercion is performed. Notes ----- Images are returned as NumPy arrays converted from PIL objects. If there is more than one color channel, then the array is transposed from the `(height, width, channel)` dimension layout native to PIL to the `(channel, height, width)` layout that is pervasive in the world of convolutional networks. If there is only one color channel, as for monochrome or binary images, a leading axis with length 1 is added for the sake of uniformity/predictability. This SourcewiseTransformer supports streams returning single examples as `bytes` objects (`str` on Python 2.x) as well as streams that return iterables containing such objects. In the case of an iterable, a list of loaded images is returned. """ def __init__(self, data_stream, color_mode='RGB', **kwargs): kwargs.setdefault('produces_examples', data_stream.produces_examples) # Acrobatics currently required to correctly set axis labels. which_sources = kwargs.get('which_sources', data_stream.sources) axis_labels = self._make_axis_labels(data_stream, which_sources, kwargs['produces_examples']) kwargs.setdefault('axis_labels', axis_labels) super(ImagesFromBytes, self).__init__(data_stream, **kwargs) self.color_mode = color_mode def transform_source_example(self, example, source_name): if PY3: bytes_type = bytes else: bytes_type = str if not isinstance(example, bytes_type): raise TypeError("expected {} object".format(bytes_type.__name__)) pil_image = Image.open(BytesIO(example)) if self.color_mode is not None: pil_image = pil_image.convert(self.color_mode) image = numpy.array(pil_image) if image.ndim == 3: # Transpose to `(channels, height, width)` layout. return image.transpose(2, 0, 1) elif image.ndim == 2: # Add a channels axis of length 1. image = image[numpy.newaxis] else: raise ValueError('unexpected number of axes') return image def transform_source_batch(self, batch, source_name): return [self.transform_source_example(im, source_name) for im in batch] def _make_axis_labels(self, data_stream, which_sources, produces_examples): # This is ugly and probably deserves a refactoring of how we handle # axis labels. It would be simpler to use memoized read-only # properties, but the AbstractDataStream constructor tries to set # self.axis_labels currently. We can't use self.which_sources or # self.produces_examples here, because this *computes* things that # need to be passed into the superclass constructor, necessarily # meaning that the superclass constructor hasn't been called. # Cooperative inheritance is hard, etc. labels = {} for source in data_stream.sources: if source in which_sources: if produces_examples: labels[source] = ('channel', 'height', 'width') else: labels[source] = ('batch', 'channel', 'height', 'width') else: labels[source] = (data_stream.axis_labels[source] if source in data_stream.axis_labels else None) return labels class MinimumImageDimensions(SourcewiseTransformer, ExpectsAxisLabels): """Resize (lists of) images to minimum dimensions. Parameters ---------- data_stream : instance of :class:`AbstractDataStream` The data stream to wrap. minimum_shape : 2-tuple The minimum `(height, width)` dimensions every image must have. Images whose height and width are larger than these dimensions are passed through as-is. resample : str, optional Resampling filter for PIL to use to upsample any images requiring it. Options include 'nearest' (default), 'bilinear', and 'bicubic'. See the PIL documentation for more detailed information. Notes ----- This transformer expects stream sources returning individual images, represented as 2- or 3-dimensional arrays, or lists of the same. The format of the stream is unaltered. """ def __init__(self, data_stream, minimum_shape, resample='nearest', **kwargs): self.minimum_shape = minimum_shape try: self.resample = getattr(Image, resample.upper()) except AttributeError: raise ValueError("unknown resampling filter '{}'".format(resample)) kwargs.setdefault('produces_examples', data_stream.produces_examples) kwargs.setdefault('axis_labels', data_stream.axis_labels) super(MinimumImageDimensions, self).__init__(data_stream, **kwargs) def transform_source_batch(self, batch, source_name): self.verify_axis_labels(('batch', 'channel', 'height', 'width'), self.data_stream.axis_labels[source_name], source_name) return [self._example_transform(im, source_name) for im in batch] def transform_source_example(self, example, source_name): self.verify_axis_labels(('channel', 'height', 'width'), self.data_stream.axis_labels[source_name], source_name) return self._example_transform(example, source_name) def _example_transform(self, example, _): if example.ndim > 3 or example.ndim < 2: raise NotImplementedError min_height, min_width = self.minimum_shape original_height, original_width = example.shape[-2:] if original_height < min_height or original_width < min_width: dt = example.dtype # If we're dealing with a colour image, swap around the axes # to be in the format that PIL needs. if example.ndim == 3: im = example.tranpose(2, 0, 1) else: im = example im = Image.fromarray(im) width, height = im.size multiplier = max(1, min_width / width, min_height / height) width = int(math.ceil(width * multiplier)) height = int(math.ceil(height * multiplier)) im = numpy.array(im.resize((width, height))).astype(dt) # If necessary, undo the axis swap from earlier. if im.ndim == 3: example = im.transpose(2, 0, 1) else: example = im return example class RandomFixedSizeCrop(SourcewiseTransformer, ExpectsAxisLabels): """Randomly crop images to a fixed window size. Parameters ---------- data_stream : :class:`AbstractDataStream` The data stream to wrap. window_shape : tuple The `(height, width)` tuple representing the size of the output window. Notes ----- This transformer expects to act on stream sources which provide one of * Single images represented as 3-dimensional ndarrays, with layout `(channel, height, width)`. * Batches of images represented as lists of 3-dimensional ndarrays, possibly of different shapes (i.e. images of differing heights/widths). * Batches of images represented as 4-dimensional ndarrays, with layout `(batch, channel, height, width)`. The format of the stream will be un-altered, i.e. if lists are yielded by `data_stream` then lists will be yielded by this transformer. """ def __init__(self, data_stream, window_shape, **kwargs): self.window_shape = window_shape self.rng = kwargs.pop('rng', None) self.warned_axis_labels = False if self.rng is None: self.rng = numpy.random.RandomState(config.default_seed) kwargs.setdefault('produces_examples', data_stream.produces_examples) kwargs.setdefault('axis_labels', data_stream.axis_labels) super(RandomFixedSizeCrop, self).__init__(data_stream, **kwargs) def transform_source_batch(self, source, source_name): self.verify_axis_labels(('batch', 'channel', 'height', 'width'), self.data_stream.axis_labels[source_name], source_name) windowed_height, windowed_width = self.window_shape if isinstance(source, list) and all(isinstance(b, numpy.ndarray) and b.ndim == 3 for b in source): return [self.transform_source_example(im, source_name) for im in source] elif isinstance(source, numpy.ndarray) and source.ndim == 4: # Hardcoded assumption of (batch, channels, height, width). # This is what the fast Cython code supports. out = numpy.empty(source.shape[:2] + self.window_shape, dtype=source.dtype) batch_size = source.shape[0] image_height, image_width = source.shape[2:] max_h_off = image_height - windowed_height max_w_off = image_width - windowed_width if max_h_off < 0 or max_w_off < 0: raise ValueError("Got ndarray batch with image dimensions {} " "but requested window shape of {}".format( source.shape[2:], self.window_shape)) offsets_w = self.rng.random_integers(0, max_w_off, size=batch_size) offsets_h = self.rng.random_integers(0, max_h_off, size=batch_size) window_batch_bchw(source, offsets_h, offsets_w, out) return out else: raise ValueError("uninterpretable batch format; expected a list " "of arrays with ndim = 3, or an array with " "ndim = 4") def transform_source_example(self, example, source_name): self.verify_axis_labels(('channel', 'height', 'width'), self.data_stream.axis_labels[source_name], source_name) windowed_height, windowed_width = self.window_shape if not isinstance(example, numpy.ndarray) or example.ndim != 3: raise ValueError("uninterpretable example format; expected " "ndarray with ndim = 3") image_height, image_width = example.shape[1:] if image_height < windowed_height or image_width < windowed_width: raise ValueError("can't obtain ({}, {}) window from image " "dimensions ({}, {})".format( windowed_height, windowed_width, image_height, image_width)) if image_height - windowed_height > 0: off_h = self.rng.random_integers(0, image_height - windowed_height) else: off_h = 0 if image_width - windowed_width > 0: off_w = self.rng.random_integers(0, image_width - windowed_width) else: off_w = 0 return example[:, off_h:off_h + windowed_height, off_w:off_w + windowed_width]
"""BleBox cover entities tests.""" import logging from unittest.mock import AsyncMock, PropertyMock import blebox_uniapi import pytest from homeassistant.components.cover import ( ATTR_CURRENT_POSITION, ATTR_POSITION, DEVICE_CLASS_DOOR, DEVICE_CLASS_GATE, DEVICE_CLASS_SHUTTER, STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, SUPPORT_CLOSE, SUPPORT_OPEN, SUPPORT_SET_POSITION, SUPPORT_STOP, ) from homeassistant.const import ( ATTR_DEVICE_CLASS, ATTR_SUPPORTED_FEATURES, SERVICE_CLOSE_COVER, SERVICE_OPEN_COVER, SERVICE_SET_COVER_POSITION, SERVICE_STOP_COVER, STATE_UNKNOWN, ) from .conftest import async_setup_entity, mock_feature ALL_COVER_FIXTURES = ["gatecontroller", "shutterbox", "gatebox"] FIXTURES_SUPPORTING_STOP = ["gatecontroller", "shutterbox"] @pytest.fixture(name="shutterbox") def shutterbox_fixture(): """Return a shutterBox fixture.""" feature = mock_feature( "covers", blebox_uniapi.cover.Cover, unique_id="BleBox-shutterBox-2bee34e750b8-position", full_name="shutterBox-position", device_class="shutter", current=None, state=None, has_stop=True, is_slider=True, ) product = feature.product type(product).name = PropertyMock(return_value="My shutter") type(product).model = PropertyMock(return_value="shutterBox") return (feature, "cover.shutterbox_position") @pytest.fixture(name="gatebox") def gatebox_fixture(): """Return a gateBox fixture.""" feature = mock_feature( "covers", blebox_uniapi.cover.Cover, unique_id="BleBox-gateBox-1afe34db9437-position", device_class="gatebox", full_name="gateBox-position", current=None, state=None, has_stop=False, is_slider=False, ) product = feature.product type(product).name = PropertyMock(return_value="My gatebox") type(product).model = PropertyMock(return_value="gateBox") return (feature, "cover.gatebox_position") @pytest.fixture(name="gatecontroller") def gate_fixture(): """Return a gateController fixture.""" feature = mock_feature( "covers", blebox_uniapi.cover.Cover, unique_id="BleBox-gateController-2bee34e750b8-position", full_name="gateController-position", device_class="gate", current=None, state=None, has_stop=True, is_slider=True, ) product = feature.product type(product).name = PropertyMock(return_value="My gate controller") type(product).model = PropertyMock(return_value="gateController") return (feature, "cover.gatecontroller_position") async def test_init_gatecontroller(gatecontroller, hass, config): """Test gateController default state.""" _, entity_id = gatecontroller entry = await async_setup_entity(hass, config, entity_id) assert entry.unique_id == "BleBox-gateController-2bee34e750b8-position" state = hass.states.get(entity_id) assert state.name == "gateController-position" assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_GATE supported_features = state.attributes[ATTR_SUPPORTED_FEATURES] assert supported_features & SUPPORT_OPEN assert supported_features & SUPPORT_CLOSE assert supported_features & SUPPORT_STOP assert supported_features & SUPPORT_SET_POSITION assert ATTR_CURRENT_POSITION not in state.attributes assert state.state == STATE_UNKNOWN device_registry = await hass.helpers.device_registry.async_get_registry() device = device_registry.async_get(entry.device_id) assert device.name == "My gate controller" assert device.identifiers == {("blebox", "abcd0123ef5678")} assert device.manufacturer == "BleBox" assert device.model == "gateController" assert device.sw_version == "1.23" async def test_init_shutterbox(shutterbox, hass, config): """Test gateBox default state.""" _, entity_id = shutterbox entry = await async_setup_entity(hass, config, entity_id) assert entry.unique_id == "BleBox-shutterBox-2bee34e750b8-position" state = hass.states.get(entity_id) assert state.name == "shutterBox-position" assert entry.device_class == DEVICE_CLASS_SHUTTER supported_features = state.attributes[ATTR_SUPPORTED_FEATURES] assert supported_features & SUPPORT_OPEN assert supported_features & SUPPORT_CLOSE assert supported_features & SUPPORT_STOP assert supported_features & SUPPORT_SET_POSITION assert ATTR_CURRENT_POSITION not in state.attributes assert state.state == STATE_UNKNOWN device_registry = await hass.helpers.device_registry.async_get_registry() device = device_registry.async_get(entry.device_id) assert device.name == "My shutter" assert device.identifiers == {("blebox", "abcd0123ef5678")} assert device.manufacturer == "BleBox" assert device.model == "shutterBox" assert device.sw_version == "1.23" async def test_init_gatebox(gatebox, hass, config): """Test cover default state.""" _, entity_id = gatebox entry = await async_setup_entity(hass, config, entity_id) assert entry.unique_id == "BleBox-gateBox-1afe34db9437-position" state = hass.states.get(entity_id) assert state.name == "gateBox-position" assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_DOOR supported_features = state.attributes[ATTR_SUPPORTED_FEATURES] assert supported_features & SUPPORT_OPEN assert supported_features & SUPPORT_CLOSE # Not available during init since requires fetching state to detect assert not supported_features & SUPPORT_STOP assert not supported_features & SUPPORT_SET_POSITION assert ATTR_CURRENT_POSITION not in state.attributes assert state.state == STATE_UNKNOWN device_registry = await hass.helpers.device_registry.async_get_registry() device = device_registry.async_get(entry.device_id) assert device.name == "My gatebox" assert device.identifiers == {("blebox", "abcd0123ef5678")} assert device.manufacturer == "BleBox" assert device.model == "gateBox" assert device.sw_version == "1.23" @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_open(feature, hass, config): """Test cover opening.""" feature_mock, entity_id = feature def initial_update(): feature_mock.state = 3 # manually stopped def open_gate(): feature_mock.state = 1 # opening feature_mock.async_update = AsyncMock(side_effect=initial_update) feature_mock.async_open = AsyncMock(side_effect=open_gate) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_CLOSED feature_mock.async_update = AsyncMock() await hass.services.async_call( "cover", SERVICE_OPEN_COVER, {"entity_id": entity_id}, blocking=True, ) assert hass.states.get(entity_id).state == STATE_OPENING @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_close(feature, hass, config): """Test cover closing.""" feature_mock, entity_id = feature def initial_update(): feature_mock.state = 4 # open def close(): feature_mock.state = 0 # closing feature_mock.async_update = AsyncMock(side_effect=initial_update) feature_mock.async_close = AsyncMock(side_effect=close) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_OPEN feature_mock.async_update = AsyncMock() await hass.services.async_call( "cover", SERVICE_CLOSE_COVER, {"entity_id": entity_id}, blocking=True ) assert hass.states.get(entity_id).state == STATE_CLOSING def opening_to_stop_feature_mock(feature_mock): """Return an mocked feature which can be updated and stopped.""" def initial_update(): feature_mock.state = 1 # opening def stop(): feature_mock.state = 2 # manually stopped feature_mock.async_update = AsyncMock(side_effect=initial_update) feature_mock.async_stop = AsyncMock(side_effect=stop) @pytest.mark.parametrize("feature", FIXTURES_SUPPORTING_STOP, indirect=["feature"]) async def test_stop(feature, hass, config): """Test cover stopping.""" feature_mock, entity_id = feature opening_to_stop_feature_mock(feature_mock) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_OPENING feature_mock.async_update = AsyncMock() await hass.services.async_call( "cover", SERVICE_STOP_COVER, {"entity_id": entity_id}, blocking=True ) assert hass.states.get(entity_id).state == STATE_OPEN @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_update(feature, hass, config): """Test cover updating.""" feature_mock, entity_id = feature def initial_update(): feature_mock.current = 29 # inverted feature_mock.state = 2 # manually stopped feature_mock.async_update = AsyncMock(side_effect=initial_update) await async_setup_entity(hass, config, entity_id) state = hass.states.get(entity_id) assert state.attributes[ATTR_CURRENT_POSITION] == 71 # 100 - 29 assert state.state == STATE_OPEN @pytest.mark.parametrize( "feature", ["gatecontroller", "shutterbox"], indirect=["feature"] ) async def test_set_position(feature, hass, config): """Test cover position setting.""" feature_mock, entity_id = feature def initial_update(): feature_mock.state = 3 # closed def set_position(position): assert position == 99 # inverted feature_mock.state = 1 # opening # feature_mock.current = position feature_mock.async_update = AsyncMock(side_effect=initial_update) feature_mock.async_set_position = AsyncMock(side_effect=set_position) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_CLOSED feature_mock.async_update = AsyncMock() await hass.services.async_call( "cover", SERVICE_SET_COVER_POSITION, {"entity_id": entity_id, ATTR_POSITION: 1}, blocking=True, ) # almost closed assert hass.states.get(entity_id).state == STATE_OPENING async def test_unknown_position(shutterbox, hass, config): """Test cover position setting.""" feature_mock, entity_id = shutterbox def initial_update(): feature_mock.state = 4 # opening feature_mock.current = -1 feature_mock.async_update = AsyncMock(side_effect=initial_update) await async_setup_entity(hass, config, entity_id) state = hass.states.get(entity_id) assert state.state == STATE_OPEN assert ATTR_CURRENT_POSITION not in state.attributes async def test_with_stop(gatebox, hass, config): """Test stop capability is available.""" feature_mock, entity_id = gatebox opening_to_stop_feature_mock(feature_mock) feature_mock.has_stop = True await async_setup_entity(hass, config, entity_id) state = hass.states.get(entity_id) supported_features = state.attributes[ATTR_SUPPORTED_FEATURES] assert supported_features & SUPPORT_STOP async def test_with_no_stop(gatebox, hass, config): """Test stop capability is not available.""" feature_mock, entity_id = gatebox opening_to_stop_feature_mock(feature_mock) feature_mock.has_stop = False await async_setup_entity(hass, config, entity_id) state = hass.states.get(entity_id) supported_features = state.attributes[ATTR_SUPPORTED_FEATURES] assert not supported_features & SUPPORT_STOP @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_update_failure(feature, hass, config, caplog): """Test that update failures are logged.""" caplog.set_level(logging.ERROR) feature_mock, entity_id = feature feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError) await async_setup_entity(hass, config, entity_id) assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_opening_state(feature, hass, config): """Test that entity properties work.""" feature_mock, entity_id = feature def initial_update(): feature_mock.state = 1 # opening feature_mock.async_update = AsyncMock(side_effect=initial_update) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_OPENING @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_closing_state(feature, hass, config): """Test that entity properties work.""" feature_mock, entity_id = feature def initial_update(): feature_mock.state = 0 # closing feature_mock.async_update = AsyncMock(side_effect=initial_update) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_CLOSING @pytest.mark.parametrize("feature", ALL_COVER_FIXTURES, indirect=["feature"]) async def test_closed_state(feature, hass, config): """Test that entity properties work.""" feature_mock, entity_id = feature def initial_update(): feature_mock.state = 3 # closed feature_mock.async_update = AsyncMock(side_effect=initial_update) await async_setup_entity(hass, config, entity_id) assert hass.states.get(entity_id).state == STATE_CLOSED
# Copyright (c) 2010 Spotify AB # Copyright (c) 2010-2011 Yelp # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents a connection to the EMR service """ import types import boto import boto.utils from boto.ec2.regioninfo import RegionInfo from boto.emr.emrobject import JobFlow, RunJobFlowResponse from boto.emr.emrobject import AddInstanceGroupsResponse, ModifyInstanceGroupsResponse from boto.emr.step import JarStep from boto.connection import AWSQueryConnection from boto.exception import EmrResponseError class EmrConnection(AWSQueryConnection): APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31') DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1') DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint', 'elasticmapreduce.amazonaws.com') ResponseError = EmrResponseError # Constants for AWS Console debugging DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar' DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/'): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path) def _required_auth_capability(self): return ['emr'] def describe_jobflow(self, jobflow_id): """ Describes a single Elastic MapReduce job flow :type jobflow_id: str :param jobflow_id: The job flow id of interest """ jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id]) if jobflows: return jobflows[0] def describe_jobflows(self, states=None, jobflow_ids=None, created_after=None, created_before=None): """ Retrieve all the Elastic MapReduce job flows on your account :type states: list :param states: A list of strings with job flow states wanted :type jobflow_ids: list :param jobflow_ids: A list of job flow IDs :type created_after: datetime :param created_after: Bound on job flow creation time :type created_before: datetime :param created_before: Bound on job flow creation time """ params = {} if states: self.build_list_params(params, states, 'JobFlowStates.member') if jobflow_ids: self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') if created_after: params['CreatedAfter'] = created_after.strftime( boto.utils.ISO8601) if created_before: params['CreatedBefore'] = created_before.strftime( boto.utils.ISO8601) return self.get_list('DescribeJobFlows', params, [('member', JobFlow)]) def terminate_jobflow(self, jobflow_id): """ Terminate an Elastic MapReduce job flow :type jobflow_id: str :param jobflow_id: A jobflow id """ self.terminate_jobflows([jobflow_id]) def terminate_jobflows(self, jobflow_ids): """ Terminate an Elastic MapReduce job flow :type jobflow_ids: list :param jobflow_ids: A list of job flow IDs """ params = {} self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') return self.get_status('TerminateJobFlows', params, verb='POST') def add_jobflow_steps(self, jobflow_id, steps): """ Adds steps to a jobflow :type jobflow_id: str :param jobflow_id: The job flow id :type steps: list(boto.emr.Step) :param steps: A list of steps to add to the job """ if type(steps) != types.ListType: steps = [steps] params = {} params['JobFlowId'] = jobflow_id # Step args step_args = [self._build_step_args(step) for step in steps] params.update(self._build_step_list(step_args)) return self.get_object( 'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST') def add_instance_groups(self, jobflow_id, instance_groups): """ Adds instance groups to a running cluster. :type jobflow_id: str :param jobflow_id: The id of the jobflow which will take the new instance groups :type instance_groups: list(boto.emr.InstanceGroup) :param instance_groups: A list of instance groups to add to the job """ if type(instance_groups) != types.ListType: instance_groups = [instance_groups] params = {} params['JobFlowId'] = jobflow_id params.update(self._build_instance_group_list_args(instance_groups)) return self.get_object('AddInstanceGroups', params, AddInstanceGroupsResponse, verb='POST') def modify_instance_groups(self, instance_group_ids, new_sizes): """ Modify the number of nodes and configuration settings in an instance group. :type instance_group_ids: list(str) :param instance_group_ids: A list of the ID's of the instance groups to be modified :type new_sizes: list(int) :param new_sizes: A list of the new sizes for each instance group """ if type(instance_group_ids) != types.ListType: instance_group_ids = [instance_group_ids] if type(new_sizes) != types.ListType: new_sizes = [new_sizes] instance_groups = zip(instance_group_ids, new_sizes) params = {} for k, ig in enumerate(instance_groups): # could be wrong - the example amazon gives uses # InstanceRequestCount, while the api documentation # says InstanceCount params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0] params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1] return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST') def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None, main_instance_type='m1.small', subordinate_instance_type='m1.small', num_instances=1, action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False, enable_debugging=False, hadoop_version=None, steps=[], bootstrap_actions=[], instance_groups=None, additional_info=None, ami_version=None, api_params=None): """ Runs a job flow :type name: str :param name: Name of the job flow :type log_uri: str :param log_uri: URI of the S3 bucket to place logs :type ec2_keyname: str :param ec2_keyname: EC2 key used for the instances :type availability_zone: str :param availability_zone: EC2 availability zone of the cluster :type main_instance_type: str :param main_instance_type: EC2 instance type of the main :type subordinate_instance_type: str :param subordinate_instance_type: EC2 instance type of the subordinate nodes :type num_instances: int :param num_instances: Number of instances in the Hadoop cluster :type action_on_failure: str :param action_on_failure: Action to take if a step terminates :type keep_alive: bool :param keep_alive: Denotes whether the cluster should stay alive upon completion :type enable_debugging: bool :param enable_debugging: Denotes whether AWS console debugging should be enabled. :type hadoop_version: str :param hadoop_version: Version of Hadoop to use. This no longer defaults to '0.20' and now uses the AMI default. :type steps: list(boto.emr.Step) :param steps: List of steps to add with the job :type bootstrap_actions: list(boto.emr.BootstrapAction) :param bootstrap_actions: List of bootstrap actions that run before Hadoop starts. :type instance_groups: list(boto.emr.InstanceGroup) :param instance_groups: Optional list of instance groups to use when creating this job. NB: When provided, this argument supersedes num_instances and main/subordinate_instance_type. :type ami_version: str :param ami_version: Amazon Machine Image (AMI) version to use for instances. Values accepted by EMR are '1.0', '2.0', and 'latest'; EMR currently defaults to '1.0' if you don't set 'ami_version'. :type additional_info: JSON str :param additional_info: A JSON string for selecting additional features :type api_params: dict :param api_params: a dictionary of additional parameters to pass directly to the EMR API (so you don't have to upgrade boto to use new EMR features). You can also delete an API parameter by setting it to None. :rtype: str :return: The jobflow id """ params = {} if action_on_failure: params['ActionOnFailure'] = action_on_failure params['Name'] = name params['LogUri'] = log_uri # Common instance args common_params = self._build_instance_common_args(ec2_keyname, availability_zone, keep_alive, hadoop_version) params.update(common_params) # NB: according to the AWS API's error message, we must # "configure instances either using instance count, main and # subordinate instance type or instance groups but not both." # # Thus we switch here on the truthiness of instance_groups. if not instance_groups: # Instance args (the common case) instance_params = self._build_instance_count_and_type_args( main_instance_type, subordinate_instance_type, num_instances) params.update(instance_params) else: # Instance group args (for spot instances or a heterogenous cluster) list_args = self._build_instance_group_list_args(instance_groups) instance_params = dict( ('Instances.%s' % k, v) for k, v in list_args.iteritems() ) params.update(instance_params) # Debugging step from EMR API docs if enable_debugging: debugging_step = JarStep(name='Setup Hadoop Debugging', action_on_failure='TERMINATE_JOB_FLOW', main_class=None, jar=self.DebuggingJar, step_args=self.DebuggingArgs) steps.insert(0, debugging_step) # Step args if steps: step_args = [self._build_step_args(step) for step in steps] params.update(self._build_step_list(step_args)) if bootstrap_actions: bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions] params.update(self._build_bootstrap_action_list(bootstrap_action_args)) if ami_version: params['AmiVersion'] = ami_version if additional_info is not None: params['AdditionalInfo'] = additional_info if api_params: for key, value in api_params.iteritems(): if value is None: params.pop(key, None) else: params[key] = value response = self.get_object( 'RunJobFlow', params, RunJobFlowResponse, verb='POST') return response.jobflowid def set_termination_protection(self, jobflow_id, termination_protection_status): """ Set termination protection on specified Elastic MapReduce job flows :type jobflow_ids: list or str :param jobflow_ids: A list of job flow IDs :type termination_protection_status: bool :param termination_protection_status: Termination protection status """ assert termination_protection_status in (True, False) params = {} params['TerminationProtected'] = (termination_protection_status and "true") or "false" self.build_list_params(params, [jobflow_id], 'JobFlowIds.member') return self.get_status('SetTerminationProtection', params, verb='POST') def _build_bootstrap_action_args(self, bootstrap_action): bootstrap_action_params = {} bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path try: bootstrap_action_params['Name'] = bootstrap_action.name except AttributeError: pass args = bootstrap_action.args() if args: self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member') return bootstrap_action_params def _build_step_args(self, step): step_params = {} step_params['ActionOnFailure'] = step.action_on_failure step_params['HadoopJarStep.Jar'] = step.jar() main_class = step.main_class() if main_class: step_params['HadoopJarStep.MainClass'] = main_class args = step.args() if args: self.build_list_params(step_params, args, 'HadoopJarStep.Args.member') step_params['Name'] = step.name return step_params def _build_bootstrap_action_list(self, bootstrap_actions): if type(bootstrap_actions) != types.ListType: bootstrap_actions = [bootstrap_actions] params = {} for i, bootstrap_action in enumerate(bootstrap_actions): for key, value in bootstrap_action.iteritems(): params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value return params def _build_step_list(self, steps): if type(steps) != types.ListType: steps = [steps] params = {} for i, step in enumerate(steps): for key, value in step.iteritems(): params['Steps.member.%s.%s' % (i+1, key)] = value return params def _build_instance_common_args(self, ec2_keyname, availability_zone, keep_alive, hadoop_version): """ Takes a number of parameters used when starting a jobflow (as specified in run_jobflow() above). Returns a comparable dict for use in making a RunJobFlow request. """ params = { 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(), } if hadoop_version: params['Instances.HadoopVersion'] = hadoop_version if ec2_keyname: params['Instances.Ec2KeyName'] = ec2_keyname if availability_zone: params['Instances.Placement.AvailabilityZone'] = availability_zone return params def _build_instance_count_and_type_args(self, main_instance_type, subordinate_instance_type, num_instances): """ Takes a main instance type (string), a subordinate instance type (string), and a number of instances. Returns a comparable dict for use in making a RunJobFlow request. """ params = { 'Instances.MainInstanceType' : main_instance_type, 'Instances.SubordinateInstanceType' : subordinate_instance_type, 'Instances.InstanceCount' : num_instances, } return params def _build_instance_group_args(self, instance_group): """ Takes an InstanceGroup; returns a dict that, when its keys are properly prefixed, can be used for describing InstanceGroups in RunJobFlow or AddInstanceGroups requests. """ params = { 'InstanceCount' : instance_group.num_instances, 'InstanceRole' : instance_group.role, 'InstanceType' : instance_group.type, 'Name' : instance_group.name, 'Market' : instance_group.market } if instance_group.market == 'SPOT': params['BidPrice'] = instance_group.bidprice return params def _build_instance_group_list_args(self, instance_groups): """ Takes a list of InstanceGroups, or a single InstanceGroup. Returns a comparable dict for use in making a RunJobFlow or AddInstanceGroups request. """ if type(instance_groups) != types.ListType: instance_groups = [instance_groups] params = {} for i, instance_group in enumerate(instance_groups): ig_dict = self._build_instance_group_args(instance_group) for key, value in ig_dict.iteritems(): params['InstanceGroups.member.%d.%s' % (i+1, key)] = value return params
from datetime import date, datetime import os import pytz import re import tempfile import traceback from xml.dom import Node from django.conf import settings from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.exceptions import PermissionDenied from django.core.files.storage import get_storage_class from django.core.mail import mail_admins from django.core.servers.basehttp import FileWrapper from django.db import IntegrityError, transaction from django.db.models.signals import pre_delete from django.http import HttpResponse, HttpResponseNotFound, \ StreamingHttpResponse from django.shortcuts import get_object_or_404 from django.utils.translation import ugettext as _ from django.utils import timezone from modilabs.utils.subprocess_timeout import ProcessTimedOut from pyxform.errors import PyXFormError from pyxform.xform2json import create_survey_element_from_xml import sys from onadata.apps.logger.models import Attachment from onadata.apps.logger.models import Instance from onadata.apps.logger.models.instance import InstanceHistory from onadata.apps.logger.models.instance import get_id_string_from_xml_str from onadata.apps.logger.models import XForm from onadata.apps.logger.models.xform import XLSFormError from onadata.apps.logger.xform_instance_parser import\ InstanceInvalidUserError, IsNotCrowdformError, DuplicateInstance,\ clean_and_parse_xml, get_uuid_from_xml, get_deprecated_uuid_from_xml,\ get_submission_date_from_xml from onadata.apps.viewer.models.data_dictionary import DataDictionary from onadata.apps.viewer.models.parsed_instance import _remove_from_mongo,\ xform_instances, ParsedInstance from onadata.libs.utils import common_tags from onadata.libs.utils.model_tools import queryset_iterator, set_uuid OPEN_ROSA_VERSION_HEADER = 'X-OpenRosa-Version' HTTP_OPEN_ROSA_VERSION_HEADER = 'HTTP_X_OPENROSA_VERSION' OPEN_ROSA_VERSION = '1.0' DEFAULT_CONTENT_TYPE = 'text/xml; charset=utf-8' DEFAULT_CONTENT_LENGTH = settings.DEFAULT_CONTENT_LENGTH uuid_regex = re.compile(r'<formhub><uuid>([^<]+)</uuid></formhub>', re.DOTALL) mongo_instances = settings.MONGO_DB.instances @transaction.commit_manually def create_instance(username, xml_file, media_files, status=u'submitted_via_web', uuid=None, date_created_override=None, request=None): """ I used to check if this file had been submitted already, I've taken this out because it was too slow. Now we're going to create a way for an admin to mark duplicate instances. This should simplify things a bit. Submission cases: If there is a username and no uuid, submitting an old ODK form. If there is no username and a uuid, submitting a touchform. If there is a username and a uuid, submitting a new ODK form. """ try: if username: username = username.lower() xml = xml_file.read() is_touchform = False # check alternative form submission ids if not uuid: # parse UUID from uploaded XML split_xml = uuid_regex.split(xml) # check that xml has UUID, then it is a crowdform if len(split_xml) > 1: uuid = split_xml[1] else: # is a touchform is_touchform = True if not username and not uuid: raise InstanceInvalidUserError() if uuid: # try find the form by its uuid which is the ideal condition if XForm.objects.filter(uuid=uuid).count() > 0: xform = XForm.objects.get(uuid=uuid) xform_username = xform.user.username if xform_username != username and not xform.is_crowd_form \ and not is_touchform: raise IsNotCrowdformError() username = xform_username # Else, since we have a username, the Instance creation logic will # handle checking for the forms existence by its id_string if username and request and request.user.is_authenticated(): id_string = get_id_string_from_xml_str(xml) xform = XForm.objects.get( id_string=id_string, user__username=username) if not xform.is_crowd_form and not is_touchform \ and xform.user.profile.require_auth \ and xform.user != request.user: raise PermissionDenied( _(u"%(request_user)s is not allowed to make submissions " u"to %(form_user)s's %(form_title)s form." % { 'request_user': request.user, 'form_user': xform.user, 'form_title': xform.title})) user = get_object_or_404(User, username=username) existing_instance_count = Instance.objects.filter( xml=xml, user=user).count() if existing_instance_count == 0: proceed_to_create_instance = True else: existing_instance = Instance.objects.filter(xml=xml, user=user)[0] if existing_instance.xform and\ not existing_instance.xform.has_start_time: proceed_to_create_instance = True else: # Ignore submission as a duplicate IFF # * a submission's XForm collects start time # * the submitted XML is an exact match with one that # has already been submitted for that user. proceed_to_create_instance = False raise DuplicateInstance() # get new and depracated uuid's new_uuid = get_uuid_from_xml(xml) duplicate_instances = Instance.objects.filter(uuid=new_uuid) if duplicate_instances: for f in media_files: Attachment.objects.get_or_create( instance=duplicate_instances[0], media_file=f, mimetype=f.content_type) # ensure we have saved the extra attachments transaction.commit() raise DuplicateInstance() if proceed_to_create_instance: # check if its an edit submission old_uuid = get_deprecated_uuid_from_xml(xml) instances = Instance.objects.filter(uuid=old_uuid) if not date_created_override: date_created_override = get_submission_date_from_xml(xml) if instances: instance = instances[0] InstanceHistory.objects.create( xml=instance.xml, xform_instance=instance, uuid=old_uuid) instance.xml = xml instance.uuid = new_uuid instance.save() else: # new submission instance = Instance.objects.create( xml=xml, user=user, status=status) for f in media_files: Attachment.objects.get_or_create( instance=instance, media_file=f, mimetype=f.content_type) # override date created if required if date_created_override: if not timezone.is_aware(date_created_override): # default to utc? date_created_override = timezone.make_aware( date_created_override, timezone.utc) instance.date_created = date_created_override instance.save() if instance.xform is not None: pi, created = ParsedInstance.objects.get_or_create( instance=instance) if not created: pi.save(async=False) # commit all changes transaction.commit() return instance except Exception: transaction.rollback() raise return None def report_exception(subject, info, exc_info=None): if exc_info: cls, err = exc_info[:2] message = _(u"Exception in request:" u" %(class)s: %(error)s")\ % {'class': cls.__name__, 'error': err} message += u"".join(traceback.format_exception(*exc_info)) else: message = u"%s" % info if settings.DEBUG or settings.TESTING_MODE: sys.stdout.write("Subject: %s\n" % subject) sys.stdout.write("Message: %s\n" % message) else: mail_admins(subject=subject, message=message) def response_with_mimetype_and_name( mimetype, name, extension=None, show_date=True, file_path=None, use_local_filesystem=False, full_mime=False): if extension is None: extension = mimetype if not full_mime: mimetype = "application/%s" % mimetype if file_path: try: if not use_local_filesystem: default_storage = get_storage_class()() wrapper = FileWrapper(default_storage.open(file_path)) response = StreamingHttpResponse(wrapper, mimetype=mimetype) response['Content-Length'] = default_storage.size(file_path) else: wrapper = FileWrapper(open(file_path)) response = StreamingHttpResponse(wrapper, mimetype=mimetype) response['Content-Length'] = os.path.getsize(file_path) except IOError: response = HttpResponseNotFound( _(u"The requested file could not be found.")) else: response = HttpResponse(mimetype=mimetype) response['Content-Disposition'] = disposition_ext_and_date( name, extension, show_date) return response def disposition_ext_and_date(name, extension, show_date=True): if name is None: return 'attachment;' if show_date: name = "%s_%s" % (name, date.today().strftime("%Y_%m_%d")) return 'attachment; filename=%s.%s' % (name, extension) def store_temp_file(data): tmp = tempfile.TemporaryFile() ret = None try: tmp.write(data) tmp.seek(0) ret = tmp finally: tmp.close() return ret def publish_form(callback): try: return callback() except (PyXFormError, XLSFormError) as e: return { 'type': 'alert-error', 'text': e } except IntegrityError as e: transaction.rollback() return { 'type': 'alert-error', 'text': _(u'Form with this id or SMS-keyword already exists.'), } except ValidationError as e: # on clone invalid URL return { 'type': 'alert-error', 'text': _(u'Invalid URL format.'), } except AttributeError as e: # form.publish returned None, not sure why... return { 'type': 'alert-error', 'text': e } except ProcessTimedOut as e: # catch timeout errors return { 'type': 'alert-error', 'text': _(u'Form validation timeout, please try again.'), } except Exception, e: # error in the XLS file; show an error to the user return { 'type': 'alert-error', 'text': e } def publish_xls_form(xls_file, user, id_string=None): """ Creates or updates a DataDictionary with supplied xls_file, user and optional id_string - if updating """ # get or create DataDictionary based on user and id string if id_string: dd = DataDictionary.objects.get( user=user, id_string=id_string) dd.xls = xls_file dd.save() return dd else: return DataDictionary.objects.create( user=user, xls=xls_file ) def publish_xml_form(xml_file, user, id_string=None): xml = xml_file.read() survey = create_survey_element_from_xml(xml) form_json = survey.to_json() if id_string: dd = DataDictionary.objects.get(user=user, id_string=id_string) dd.xml = xml dd.json = form_json dd._mark_start_time_boolean() set_uuid(dd) dd._set_uuid_in_xml() dd.save() return dd else: dd = DataDictionary(user=user, xml=xml, json=form_json) dd._mark_start_time_boolean() set_uuid(dd) dd._set_uuid_in_xml(file_name=xml_file.name) dd.save() return dd class BaseOpenRosaResponse(HttpResponse): status_code = 201 def __init__(self, *args, **kwargs): super(BaseOpenRosaResponse, self).__init__(*args, **kwargs) self[OPEN_ROSA_VERSION_HEADER] = OPEN_ROSA_VERSION tz = pytz.timezone(settings.TIME_ZONE) dt = datetime.now(tz).strftime('%a, %d %b %Y %H:%M:%S %Z') self['Date'] = dt self['X-OpenRosa-Accept-Content-Length'] = DEFAULT_CONTENT_LENGTH self['Content-Type'] = DEFAULT_CONTENT_TYPE class OpenRosaResponse(BaseOpenRosaResponse): status_code = 201 def __init__(self, *args, **kwargs): super(OpenRosaResponse, self).__init__(*args, **kwargs) # wrap content around xml self.content = '''<?xml version='1.0' encoding='UTF-8' ?> <OpenRosaResponse xmlns="http://openrosa.org/http/response"> <message nature="">%s</message> </OpenRosaResponse>''' % self.content class OpenRosaResponseNotFound(OpenRosaResponse): status_code = 404 class OpenRosaResponseBadRequest(OpenRosaResponse): status_code = 400 class OpenRosaResponseNotAllowed(OpenRosaResponse): status_code = 405 def inject_instanceid(xml_str, uuid): if get_uuid_from_xml(xml_str) is None: xml = clean_and_parse_xml(xml_str) children = xml.childNodes if children.length == 0: raise ValueError(_("XML string must have a survey element.")) # check if we have a meta tag survey_node = children.item(0) meta_tags = [ n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName.lower() == "meta"] if len(meta_tags) == 0: meta_tag = xml.createElement("meta") xml.documentElement.appendChild(meta_tag) else: meta_tag = meta_tags[0] # check if we have an instanceID tag uuid_tags = [ n for n in meta_tag.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == "instanceID"] if len(uuid_tags) == 0: uuid_tag = xml.createElement("instanceID") meta_tag.appendChild(uuid_tag) else: uuid_tag = uuid_tags[0] # insert meta and instanceID text_node = xml.createTextNode(u"uuid:%s" % uuid) uuid_tag.appendChild(text_node) return xml.toxml() return xml_str def update_mongo_for_xform(xform, only_update_missing=True): instance_ids = set( [i.id for i in Instance.objects.only('id').filter(xform=xform)]) sys.stdout.write("Total no of instances: %d\n" % len(instance_ids)) mongo_ids = set() user = xform.user userform_id = "%s_%s" % (user.username, xform.id_string) if only_update_missing: sys.stdout.write("Only updating missing mongo instances\n") mongo_ids = set( [rec[common_tags.ID] for rec in mongo_instances.find( {common_tags.USERFORM_ID: userform_id}, {common_tags.ID: 1})]) sys.stdout.write("Total no of mongo instances: %d\n" % len(mongo_ids)) # get the difference instance_ids = instance_ids.difference(mongo_ids) else: # clear mongo records mongo_instances.remove({common_tags.USERFORM_ID: userform_id}) # get instances sys.stdout.write( "Total no of instances to update: %d\n" % len(instance_ids)) instances = Instance.objects.only('id').in_bulk( [id for id in instance_ids]) total = len(instances) done = 0 for id, instance in instances.items(): (pi, created) = ParsedInstance.objects.get_or_create(instance=instance) pi.save(async=False) done += 1 # if 1000 records are done, flush mongo if (done % 1000) == 0: sys.stdout.write( 'Updated %d records, flushing MongoDB...\n' % done) settings.MONGO_CONNECTION.admin.command({'fsync': 1}) progress = "\r%.2f %% done..." % ((float(done) / float(total)) * 100) sys.stdout.write(progress) sys.stdout.flush() # flush mongo again when done settings.MONGO_CONNECTION.admin.command({'fsync': 1}) sys.stdout.write( "\nUpdated %s\n------------------------------------------\n" % xform.id_string) def mongo_sync_status(remongo=False, update_all=False, user=None, xform=None): """Check the status of records in the mysql db versus mongodb. At a minimum, return a report (string) of the results. Optionally, take action to correct the differences, based on these parameters, if present and defined: remongo -> if True, update the records missing in mongodb (default: False) update_all -> if True, update all the relevant records (default: False) user -> if specified, apply only to the forms for the given user (default: None) xform -> if specified, apply only to the given form (default: None) """ qs = XForm.objects.only('id_string', 'user').select_related('user') if user and not xform: qs = qs.filter(user=user) elif user and xform: qs = qs.filter(user=user, id_string=xform.id_string) else: qs = qs.all() total = qs.count() found = 0 done = 0 total_to_remongo = 0 report_string = "" for xform in queryset_iterator(qs, 100): # get the count user = xform.user instance_count = Instance.objects.filter(xform=xform).count() userform_id = "%s_%s" % (user.username, xform.id_string) mongo_count = mongo_instances.find( {common_tags.USERFORM_ID: userform_id}).count() if instance_count != mongo_count or update_all: line = "user: %s, id_string: %s\nInstance count: %d\t"\ "Mongo count: %d\n---------------------------------"\ "-----\n" % ( user.username, xform.id_string, instance_count, mongo_count) report_string += line found += 1 total_to_remongo += (instance_count - mongo_count) # should we remongo if remongo or (remongo and update_all): if update_all: sys.stdout.write( "Updating all records for %s\n--------------------" "---------------------------\n" % xform.id_string) else: sys.stdout.write( "Updating missing records for %s\n----------------" "-------------------------------\n" % xform.id_string) update_mongo_for_xform( xform, only_update_missing=not update_all) done += 1 sys.stdout.write( "%.2f %% done ...\r" % ((float(done) / float(total)) * 100)) # only show stats if we are not updating mongo, the update function # will show progress if not remongo: line = "Total # of forms out of sync: %d\n" \ "Total # of records to remongo: %d\n" % (found, total_to_remongo) report_string += line return report_string def remove_xform(xform): # disconnect parsed instance pre delete signal pre_delete.disconnect(_remove_from_mongo, sender=ParsedInstance) # delete instances from mongo db query = { ParsedInstance.USERFORM_ID: "%s_%s" % (xform.user.username, xform.id_string)} xform_instances.remove(query, j=True) # delete xform, and all related models xform.delete() # reconnect parsed instance pre delete signal? pre_delete.connect(_remove_from_mongo, sender=ParsedInstance)
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import pandas as pd import sys from airflow import settings from airflow.configuration import conf from airflow.jobs import SchedulerJob from airflow.models import DagBag, DagModel, DagRun, TaskInstance from airflow.utils import timezone from airflow.utils.state import State SUBDIR = 'scripts/perf/dags' DAG_IDS = ['perf_dag_1', 'perf_dag_2'] MAX_RUNTIME_SECS = 6 class SchedulerMetricsJob(SchedulerJob): """ This class extends SchedulerJob to instrument the execution performance of task instances contained in each DAG. We want to know if any DAG is starved of resources, and this will be reflected in the stats printed out at the end of the test run. The following metrics will be instrumented for each task instance (dag_id, task_id, execution_date) tuple: 1. Queuing delay - time taken from starting the executor to the task instance to be added to the executor queue. 2. Start delay - time taken from starting the executor to the task instance to start execution. 3. Land time - time taken from starting the executor to task instance completion. 4. Duration - time taken for executing the task instance. The DAGs implement bash operators that call the system wait command. This is representative of typical operators run on Airflow - queries that are run on remote systems and spend the majority of their time on I/O wait. To Run: $ python scripts/perf/scheduler_ops_metrics.py [timeout] You can specify timeout in seconds as an optional parameter. Its default value is 6 seconds. """ __mapper_args__ = { 'polymorphic_identity': 'SchedulerMetricsJob' } def print_stats(self): """ Print operational metrics for the scheduler test. """ session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) successful_tis = [x for x in tis if x.state == State.SUCCESS] ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date, (ti.queued_dttm - self.start_date).total_seconds(), (ti.start_date - self.start_date).total_seconds(), (ti.end_date - self.start_date).total_seconds(), ti.duration) for ti in successful_tis] ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id', 'execution_date', 'queue_delay', 'start_delay', 'land_time', 'duration']) print('Performance Results') print('###################') for dag_id in DAG_IDS: print('DAG {}'.format(dag_id)) print(ti_perf_df[ti_perf_df['dag_id'] == dag_id]) print('###################') if len(tis) > len(successful_tis): print("WARNING!! The following task instances haven't completed") print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state) for ti in filter(lambda x: x.state != State.SUCCESS, tis)], columns=['dag_id', 'task_id', 'execution_date', 'state'])) session.commit() def heartbeat(self): """ Override the scheduler heartbeat to determine when the test is complete """ super(SchedulerMetricsJob, self).heartbeat() session = settings.Session() # Get all the relevant task instances TI = TaskInstance successful_tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .filter(TI.state.in_([State.SUCCESS])) .all() ) session.commit() dagbag = DagBag(SUBDIR) dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS] # the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval. num_task_instances = sum([(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks]) if (len(successful_tis) == num_task_instances or (timezone.utcnow() - self.start_date).total_seconds() > MAX_RUNTIME_SECS): if len(successful_tis) == num_task_instances: self.log.info("All tasks processed! Printing stats.") else: self.log.info("Test timeout reached. Printing available stats.") self.print_stats() set_dags_paused_state(True) sys.exit() def clear_dag_runs(): """ Remove any existing DAG runs for the perf test DAGs. """ session = settings.Session() drs = session.query(DagRun).filter( DagRun.dag_id.in_(DAG_IDS), ).all() for dr in drs: logging.info('Deleting DagRun :: {}'.format(dr)) session.delete(dr) def clear_dag_task_instances(): """ Remove any existing task instances for the perf test DAGs. """ session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) for ti in tis: logging.info('Deleting TaskInstance :: {}'.format(ti)) session.delete(ti) session.commit() def set_dags_paused_state(is_paused): """ Toggle the pause state of the DAGs in the test. """ session = settings.Session() dms = session.query(DagModel).filter( DagModel.dag_id.in_(DAG_IDS)) for dm in dms: logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused)) dm.is_paused = is_paused session.commit() def main(): global MAX_RUNTIME_SECS if len(sys.argv) > 1: try: max_runtime_secs = int(sys.argv[1]) if max_runtime_secs < 1: raise ValueError MAX_RUNTIME_SECS = max_runtime_secs except ValueError: logging.error('Specify a positive integer for timeout.') sys.exit(1) conf.load_test_config() set_dags_paused_state(False) clear_dag_runs() clear_dag_task_instances() job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR) job.run() if __name__ == "__main__": main()
import numpy as np import pytest from scipy import sparse from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from sklearn.utils import check_random_state from sklearn.utils._testing import assert_allclose from sklearn.datasets import make_regression from sklearn.linear_model import LinearRegression, RANSACRegressor from sklearn.linear_model import OrthogonalMatchingPursuit from sklearn.linear_model._ransac import _dynamic_max_trials from sklearn.exceptions import ConvergenceWarning # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data rng = np.random.RandomState(1000) outliers = np.unique(rng.randint(len(X), size=200)) data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10 X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert X.shape[0] == 2 assert y.shape[0] == 2 return False rng = np.random.RandomState(0) X = rng.rand(10, 2) y = rng.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) with pytest.raises(ValueError): ransac_estimator.fit(X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert X.shape[0] == 2 assert y.shape[0] == 2 return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) with pytest.raises(ValueError): ransac_estimator.fit(X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) with pytest.raises(ValueError): ransac_estimator.fit(X, y) # there is a 1e-9 chance it will take these many trials. No good reason # 1e-2 isn't enough, can still happen # 2 is the what ransac defines as min_samples = X.shape[1] + 1 max_trials = _dynamic_max_trials( len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2) for i in range(50): ransac_estimator.set_params(min_samples=2, random_state=i) ransac_estimator.fit(X, y) assert ransac_estimator.n_trials_ < max_trials + 1 def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert ransac_estimator.n_trials_ == 1 def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert ransac_estimator.n_trials_ == 1 def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert ransac_estimator.score(X[2:], y[2:]) == 1 assert ransac_estimator.score(X[:2], y[:2]) < 1 def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_array_equal(ransac_estimator.predict(X), np.zeros(100)) def test_ransac_resid_thresh_no_inliers(): # When residual_threshold=0.0 there are no inliers and a # ValueError with a message should be raised base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.0, random_state=0, max_trials=5) msg = ("RANSAC could not find a valid consensus set") with pytest.raises(ValueError, match=msg): ransac_estimator.fit(X, y) assert ransac_estimator.n_skips_no_inliers_ == 5 assert ransac_estimator.n_skips_invalid_data_ == 0 assert ransac_estimator.n_skips_invalid_model_ == 0 def test_ransac_no_valid_data(): def is_data_valid(X, y): return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_data_valid=is_data_valid, max_trials=5) msg = ("RANSAC could not find a valid consensus set") with pytest.raises(ValueError, match=msg): ransac_estimator.fit(X, y) assert ransac_estimator.n_skips_no_inliers_ == 0 assert ransac_estimator.n_skips_invalid_data_ == 5 assert ransac_estimator.n_skips_invalid_model_ == 0 def test_ransac_no_valid_model(): def is_model_valid(estimator, X, y): return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_model_valid=is_model_valid, max_trials=5) msg = ("RANSAC could not find a valid consensus set") with pytest.raises(ValueError, match=msg): ransac_estimator.fit(X, y) assert ransac_estimator.n_skips_no_inliers_ == 0 assert ransac_estimator.n_skips_invalid_data_ == 0 assert ransac_estimator.n_skips_invalid_model_ == 5 def test_ransac_exceed_max_skips(): def is_data_valid(X, y): return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3) msg = ("RANSAC skipped more iterations than `max_skips`") with pytest.raises(ValueError, match=msg): ransac_estimator.fit(X, y) assert ransac_estimator.n_skips_no_inliers_ == 0 assert ransac_estimator.n_skips_invalid_data_ == 4 assert ransac_estimator.n_skips_invalid_model_ == 0 def test_ransac_warn_exceed_max_skips(): global cause_skip cause_skip = False def is_data_valid(X, y): global cause_skip if not cause_skip: cause_skip = True return True else: return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, is_data_valid=is_data_valid, max_skips=3, max_trials=5) warning_message = ( "RANSAC found a valid consensus set but exited " "early due to skipping more iterations than " "`max_skips`. See estimator attributes for " "diagnostics." ) with pytest.warns(ConvergenceWarning, match=warning_message): ransac_estimator.fit(X, y) assert ransac_estimator.n_skips_no_inliers_ == 0 assert ransac_estimator.n_skips_invalid_data_ == 4 assert ransac_estimator.n_skips_invalid_model_ == 0 def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) with pytest.raises(ValueError): ransac_estimator3.fit(X, y) with pytest.raises(ValueError): ransac_estimator4.fit(X, y) with pytest.raises(ValueError): ransac_estimator7.fit(X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_residual_loss(): def loss_multi1(y_true, y_pred): return np.sum(np.abs(y_true - y_pred), axis=1) def loss_multi2(y_true, y_pred): return np.sum((y_true - y_pred) ** 2, axis=1) def loss_mono(y_true, y_pred): return np.abs(y_true - y_pred) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.loss = loss_mono ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss="squared_error") ransac_estimator3.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert _dynamic_max_trials(100, 100, 2, 0.99) == 1 # e = 5%, min_samples = 2 assert _dynamic_max_trials(95, 100, 2, 0.99) == 2 # e = 10%, min_samples = 2 assert _dynamic_max_trials(90, 100, 2, 0.99) == 3 # e = 30%, min_samples = 2 assert _dynamic_max_trials(70, 100, 2, 0.99) == 7 # e = 50%, min_samples = 2 assert _dynamic_max_trials(50, 100, 2, 0.99) == 17 # e = 5%, min_samples = 8 assert _dynamic_max_trials(95, 100, 8, 0.99) == 5 # e = 10%, min_samples = 8 assert _dynamic_max_trials(90, 100, 8, 0.99) == 9 # e = 30%, min_samples = 8 assert _dynamic_max_trials(70, 100, 8, 0.99) == 78 # e = 50%, min_samples = 8 assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177 # e = 0%, min_samples = 10 assert _dynamic_max_trials(1, 100, 10, 0) == 0 assert _dynamic_max_trials(1, 100, 10, 1) == float('inf') base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) with pytest.raises(ValueError): ransac_estimator.fit(X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) with pytest.raises(ValueError): ransac_estimator.fit(X, y) def test_ransac_fit_sample_weight(): ransac_estimator = RANSACRegressor(random_state=0) n_samples = y.shape[0] weights = np.ones(n_samples) ransac_estimator.fit(X, y, weights) # sanity check assert ransac_estimator.inlier_mask_.shape[0] == n_samples ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False # check that mask is correct assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) X_ = random_state.randint(0, 200, [10, 1]) y_ = np.ndarray.flatten(0.2 * X_ + 2) sample_weight = random_state.randint(0, 10, 10) outlier_X = random_state.randint(0, 1000, [1, 1]) outlier_weight = random_state.randint(0, 10, 1) outlier_y = random_state.randint(-1000, 0, 1) X_flat = np.append(np.repeat(X_, sample_weight, axis=0), np.repeat(outlier_X, outlier_weight, axis=0), axis=0) y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0), np.repeat(outlier_y, outlier_weight, axis=0), axis=0)) ransac_estimator.fit(X_flat, y_flat) ref_coef_ = ransac_estimator.estimator_.coef_ sample_weight = np.append(sample_weight, outlier_weight) X_ = np.append(X_, outlier_X, axis=0) y_ = np.append(y_, outlier_y) ransac_estimator.fit(X_, y_, sample_weight) assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_) # check that if base_estimator.fit doesn't support # sample_weight, raises error base_estimator = OrthogonalMatchingPursuit() ransac_estimator = RANSACRegressor(base_estimator) with pytest.raises(ValueError): ransac_estimator.fit(X, y, weights) def test_ransac_final_model_fit_sample_weight(): X, y = make_regression(n_samples=1000, random_state=10) rng = check_random_state(42) sample_weight = rng.randint(1, 4, size=y.shape[0]) sample_weight = sample_weight / sample_weight.sum() ransac = RANSACRegressor(base_estimator=LinearRegression(), random_state=0) ransac.fit(X, y, sample_weight=sample_weight) final_model = LinearRegression() mask_samples = ransac.inlier_mask_ final_model.fit( X[mask_samples], y[mask_samples], sample_weight=sample_weight[mask_samples] ) assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12) # TODO: Remove in v1.2 @pytest.mark.parametrize("old_loss, new_loss", [ ("absolute_loss", "squared_error"), ("squared_loss", "absolute_error"), ]) def test_loss_deprecated(old_loss, new_loss): est1 = RANSACRegressor(loss=old_loss, random_state=0) with pytest.warns(FutureWarning, match=f"The loss '{old_loss}' was deprecated"): est1.fit(X, y) est2 = RANSACRegressor(loss=new_loss, random_state=0) est2.fit(X, y) assert_allclose(est1.predict(X), est2.predict(X))
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class VirtualNetworkTapsOperations(object): """VirtualNetworkTapsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2019_08_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _delete_initial( self, resource_group_name, # type: str tap_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str tap_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes the specified virtual network tap. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param tap_name: The name of the virtual network tap. :type tap_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, tap_name=tap_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def get( self, resource_group_name, # type: str tap_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.VirtualNetworkTap" """Gets information about the specified virtual network tap. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param tap_name: The name of virtual network tap. :type tap_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualNetworkTap, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.VirtualNetworkTap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualNetworkTap', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def _create_or_update_initial( self, resource_group_name, # type: str tap_name, # type: str parameters, # type: "_models.VirtualNetworkTap" **kwargs # type: Any ): # type: (...) -> "_models.VirtualNetworkTap" cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'VirtualNetworkTap') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VirtualNetworkTap', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VirtualNetworkTap', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str tap_name, # type: str parameters, # type: "_models.VirtualNetworkTap" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.VirtualNetworkTap"] """Creates or updates a Virtual Network Tap. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param tap_name: The name of the virtual network tap. :type tap_name: str :param parameters: Parameters supplied to the create or update virtual network tap operation. :type parameters: ~azure.mgmt.network.v2019_08_01.models.VirtualNetworkTap :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either VirtualNetworkTap or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkTap] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, tap_name=tap_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('VirtualNetworkTap', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def _update_tags_initial( self, resource_group_name, # type: str tap_name, # type: str tap_parameters, # type: "_models.TagsObject" **kwargs # type: Any ): # type: (...) -> "_models.VirtualNetworkTap" cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_tags_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(tap_parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualNetworkTap', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def begin_update_tags( self, resource_group_name, # type: str tap_name, # type: str tap_parameters, # type: "_models.TagsObject" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.VirtualNetworkTap"] """Updates an VirtualNetworkTap tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param tap_name: The name of the tap. :type tap_name: str :param tap_parameters: Parameters supplied to update VirtualNetworkTap tags. :type tap_parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either VirtualNetworkTap or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkTap] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._update_tags_initial( resource_group_name=resource_group_name, tap_name=tap_name, tap_parameters=tap_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('VirtualNetworkTap', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'tapName': self._serialize.url("tap_name", tap_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore def list_all( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.VirtualNetworkTapListResult"] """Gets all the VirtualNetworkTaps in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkTapListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore def list_by_resource_group( self, resource_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.VirtualNetworkTapListResult"] """Gets all the VirtualNetworkTaps in a subscription. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkTapListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
import theano import theano.tensor as T floatX = theano.config.floatX import numpy as np import time, datetime import sys import logging internal_logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) from mozi.log import Log from mozi.utils.theano_utils import shared_zeros from mozi.utils.utils import split_list, generate_shared_list, merge_lists, \ get_shared_values, is_shared_var from mozi.utils.check_memory import get_mem_usage from mozi.utils.progbar import Progbar class TrainObject(): def __init__(self, model, dataset, train_cost, valid_cost, learning_method, stop_criteria, log=None): self.model = model self.dataset = dataset self.train_cost = train_cost self.valid_cost = valid_cost self.learning_method = learning_method self.stop_criteria = stop_criteria self.log = log if self.log is None: # use default Log setting self.log = Log(logger=internal_logger) elif self.log.save_to_database: self.log.print_records() self.log.info('\n') def setup(self): self.log.info( '..begin setting up train object') #===================[ build params and deltas list ]==================# params = [] deltas = [] for layer in self.model.layers: for param in layer.params: # checked that the param to be updated is shared variable if is_shared_var(param): params += [param] deltas += [shared_zeros(shape=param.shape.eval())] #=====================[ training params updates ]=====================# self.log.info("..update params: " + str(params)) train_y_pred, train_layers_stats = self.model.train_fprop(self.model.input_var) train_cost = self.train_cost(self.model.output_var, train_y_pred).astype(floatX) train_updates = [] gparams = T.grad(train_cost, params) for delta, param, gparam in zip(deltas, params, gparams): train_updates += self.learning_method.update(delta, gparam) train_updates += [(param, param+delta)] #----[ append updates of stats from each layer to train updates ]-----# self.train_stats_names, train_stats_vars = split_list(train_layers_stats) train_stats_vars = [var.astype(floatX) for var in train_stats_vars] self.train_stats_shared = generate_shared_list(train_stats_vars) train_stats_updates = merge_lists(self.train_stats_shared, train_stats_vars) train_updates += train_stats_updates #-------------------------[ train functions ]-------------------------# self.log.info('..begin compiling functions') self.training = theano.function(inputs=[self.model.input_var, self.model.output_var], outputs=train_cost, updates=train_updates, on_unused_input='warn', allow_input_downcast=True) self.log.info('..training function compiled') #======================[ testing params updates ]=====================# test_y_pred, test_layers_stats = self.model.test_fprop(self.model.input_var) #-----[ append updates of stats from each layer to test updates ]-----# self.test_stats_names, test_stats_vars = split_list(test_layers_stats) test_stats_vars = [var.astype(floatX) for var in test_stats_vars] self.test_stats_shared = generate_shared_list(test_stats_vars) test_stats_updates = merge_lists(self.test_stats_shared, test_stats_vars) #-------------------------[ test functions ]--------------------------# test_stopping_error = self.valid_cost(self.model.output_var, test_y_pred).astype(floatX) test_cost = self.train_cost(self.model.output_var, test_y_pred).astype(floatX) self.testing = theano.function(inputs=[self.model.input_var, self.model.output_var], outputs=(test_stopping_error, test_cost), updates=test_stats_updates, on_unused_input='warn', allow_input_downcast=True) self.log.info('..testing function compiled') def run(self): best_valid_error = float(sys.maxint) valid_error = float(sys.maxint) train_cost = float(sys.maxint) valid_cost = float(sys.maxint) train_stats_values = [] valid_stats_values = [] epoch = 0 error_dcr = 0 self.best_epoch_last_update = 0 self.best_valid_last_update = float(sys.maxint) train_stats_names = ['train_' + name for name in self.train_stats_names] valid_stats_names = ['valid_' + name for name in self.test_stats_names] job_start = time.time() while (self.continue_learning(epoch, error_dcr, best_valid_error)): if epoch > 0: self.log.info("best_epoch_last_update: %d"%self.best_epoch_last_update) self.log.info("valid_error_decrease: %f"%error_dcr) self.log.info("best_valid_last_update: %f"%self.best_valid_last_update) self.log.info("========[ End of Epoch ]========\n\n") epoch += 1 start_time = time.time() num_train_examples = 0 total_train_cost = 0. train_stats_values = np.zeros(len(train_stats_names), dtype=floatX) num_valid_examples = 0 total_valid_cost = 0. total_valid_stopping_cost = 0. valid_stats_values = np.zeros(len(valid_stats_names), dtype=floatX) blk = 0 for block in self.dataset: block_time = time.time() blk += 1 train_set = block.get_train() valid_set = block.get_valid() #====================[ Training Progress ]====================# if train_set.dataset_size > 0: self.log.info('..training '+ self.dataset.__class__.__name__ + ' block %s/%s'%(blk, self.dataset.nblocks)) progbar = Progbar(target=train_set.dataset_size) for idx in train_set: cost = self.training(train_set.X[idx], train_set.y[idx]) total_train_cost += cost * len(idx) num_train_examples += len(idx) train_stats_values += len(idx) * get_shared_values(self.train_stats_shared) progbar.update(num_train_examples) print #-------[ Update train best cost and error values ]-------# train_cost = total_train_cost / num_train_examples train_stats_values /= num_train_examples #===================[ Validating Progress ]===================# if valid_set.dataset_size > 0: self.log.info('..validating ' + self.dataset.__class__.__name__ + ' block %s/%s'%(blk, self.dataset.nblocks)) progbar = Progbar(target=valid_set.dataset_size) for idx in valid_set: stopping_cost, cost = self.testing(valid_set.X[idx], valid_set.y[idx]) total_valid_cost += cost * len(idx) total_valid_stopping_cost += stopping_cost * len(idx) num_valid_examples += len(idx) valid_stats_values += len(idx) * get_shared_values(self.test_stats_shared) progbar.update(num_valid_examples) print #-------[ Update valid best cost and error values ]-------# valid_error = total_valid_stopping_cost / num_valid_examples valid_cost = total_valid_cost / num_valid_examples valid_stats_values /= num_valid_examples if valid_error < best_valid_error: best_valid_error = valid_error self.log.info('..best validation error so far') if self.log.save_model: self.log._save_model(self.model) self.log.info('..model saved') if valid_error < self.best_valid_last_update: error_dcr = self.best_valid_last_update - valid_error else: error_dcr = 0 self.log.info('block time: %0.2fs'%(time.time()-block_time)) self.log.info(get_mem_usage()) #==============[ save to database, save epoch error]==============# if self.log.save_to_database: self.log._save_to_database(epoch, train_cost, valid_cost, best_valid_error) self.log.info('..sent to database: %s:%s' % (self.log.save_to_database['name'], self.log.experiment_name)) if self.log.save_epoch_error: self.log._save_epoch_error(epoch, valid_error) self.log.info('..epoch error saved') end_time = time.time() #=====================[ log outputs to file ]=====================# merged_train = merge_lists(train_stats_names, train_stats_values) merged_valid = merge_lists(valid_stats_names, valid_stats_values) outputs = [('epoch', epoch), ('runtime(s)', int(end_time-start_time)), ('train_' + self.train_cost.func_name, train_cost), ('valid_' + self.train_cost.func_name, valid_cost), ('valid_' + self.valid_cost.func_name, valid_error), ('best_valid_' + self.valid_cost.func_name, best_valid_error)] outputs += merged_train + merged_valid self.log._log_outputs(outputs) job_end = time.time() self.log.info('Job Completed on %s'%time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(job_end))) ttl_time = int(job_end - job_start) dt = datetime.timedelta(seconds=ttl_time) self.log.info('Total Time Taken: %s'%str(dt)) self.log.info("========[ End of Job ]========\n\n") def continue_learning(self, epoch, error_dcr, best_valid_error): if epoch > self.stop_criteria['max_epoch']: return False elif self.stop_criteria['percent_decrease'] is None or \ self.stop_criteria['epoch_look_back'] is None: return True elif np.abs(float(error_dcr) / self.best_valid_last_update) \ >= self.stop_criteria['percent_decrease']: self.best_valid_last_update = best_valid_error self.best_epoch_last_update = epoch return True elif epoch - self.best_epoch_last_update > \ self.stop_criteria['epoch_look_back']: return False else: return True
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from conary import versions from conary.lib import log from conary.deps.deps import parseFlavor, parseDep, overrideFlavor from rmake.lib.apiutils import freeze, thaw from rmake.lib import logger from rmake.worker import resolver from rmake.build import dephandler from rmake_test import rmakehelp class ResolverTest(rmakehelp.RmakeHelper): def testResolveResult(self): trv = self.addComponent('foo:runtime', '1.0', 'ssl') tup = trv.getNameVersionFlavor() job = (tup[0], (None, None), (tup[1], tup[2]), False) r = resolver.ResolveResult() r.troveResolved([job], [], []) r2 = thaw('ResolveResult', freeze('ResolveResult', r)) assert(r2.getBuildReqs() == [ job ]) assert(r2.success) assert(not r2.inCycle) r = resolver.ResolveResult(inCycle=True) r.troveMissingBuildReqs(True, [('foo', None, parseFlavor('ssl'))]) r2 = thaw('ResolveResult', freeze('ResolveResult', r)) assert(not r2.hasMissingDeps()) assert(r2.hasMissingBuildReqs()) assert(r2.getMissingBuildReqs() == [(True, ('foo', '', parseFlavor('ssl')))]) assert(not r2.success) assert(r2.inCycle) r = resolver.ResolveResult(inCycle=True) r.troveMissingDependencies(True, [(trv.getNameVersionFlavor(), parseDep('trove: foo trove: bar'))]) r2 = thaw('ResolveResult', freeze('ResolveResult', r)) assert(r.getMissingDeps() == r2.getMissingDeps()) assert(r2.hasMissingDeps()) assert(not r2.success) assert(r2.inCycle) def testFindWrongArchInBuiltTroves(self): # wrong flavor in builtTroves list self.buildCfg.flavor = [parseFlavor('is:x86')] self.openRmakeRepository() trv = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'is:x86_64') builtTroves = [trv.getNameVersionFlavor()] # right flavor sitting in the repos self.addComponent('foo:runtime', '1.0', 'is:x86') trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves, []) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) result = res.resolve(resolveJob) # flavor should be the x86 one. assert(result.success and str(list(result.getBuildReqs())[0][2][1]) == 'is: x86') ##### # now let's try when it's in the resolveTroves list self.buildCfg.resolveTroveTups = [builtTroves] resolveJob = dephandler.ResolveJob(bt, self.buildCfg, [], []) result = res.resolve(resolveJob) assert(result.success and str(list(result.getBuildReqs())[0][2][1]) == 'is: x86') def testResolveWrongArchInBuiltTroves(self): # wrong flavor in builtTroves list self.buildCfg.flavor = [parseFlavor('is:x86')] self.openRmakeRepository() trv = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'is:x86_64') builtTroves = [trv.getNameVersionFlavor()] # right flavor sitting in the repos self.addComponent('foo:runtime', '1.0', 'is:x86') # there's another one on a branch. We'll put that one in our # crossTroves list which shouldn't be used because we're not looking # for crossRequirements trv = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'ssl is:x86') crossTroves = [trv.getNameVersionFlavor()] self.addComponent('bar:runtime', '1.0', 'is:x86', requires='trove:foo:runtime') trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['bar:runtime']) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves, crossTroves) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) result = res.resolve(resolveJob) # flavor should be the x86 one. assert(result.success) flavors = set([ str(x[2][1]) for x in result.getBuildReqs() ]) assert(flavors == set(['is: x86'])) ##### # now let's try when it's in the resolveTroves list self.buildCfg.resolveTroveTups = [builtTroves] resolveJob = dephandler.ResolveJob(bt, self.buildCfg, [], crossTroves) result = res.resolve(resolveJob) assert(result.success) flavors = set([ str(x[2][1]) for x in result.getBuildReqs() ]) assert(flavors == set(['is: x86'])) def testResolveCrossDependencies(self): self.openRmakeRepository() # resolving cross root dependencies have a couple of odd features about # them: # 1. the flavor is !cross even if the build flavor is cross # 2. the flavor moves the target: flavor is to the is: spot # 3. file: dependencies are ignored # 4. it includes troves in crossTroves list. # We test some of these here. self.addComponent('foo:runtime', '1.0', 'cross is:x86_64') self.addComponent('foo:runtime', '1.0', '!cross is:x86_64') self.addComponent('foo:runtime', '1.0', 'cross is:x86') self.addComponent('foo:runtime', '1.0', '!cross is:x86') self.addComponent('bar:runtime', '1.0', 'cross is:x86_64', requires='trove:foo:runtime file: /tmp/blah') self.addComponent('bar:runtime', '1.0', '!cross is:x86_64', requires='trove:foo:runtime file: /tmp/blah') self.addComponent('bar:runtime', '1.0', 'cross is:x86', requires='trove:foo:runtime file: /tmp/blah') self.addComponent('bar:runtime', '1.0', '!cross is:x86', requires='trove:foo:runtime file: /tmp/blah') self.addComponent('blah:runtime', '1.0', 'cross is:x86', provides='file: /tmp/blah') trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, trv.getName(), trv.getVersion(), parseFlavor('cross is:x86 target:x86_64')) bt.setBuildRequirements(['bar:runtime']) bt.setCrossRequirements(['bar:runtime']) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, [], []) self.buildCfg.flavor = [overrideFlavor(self.buildCfg.buildFlavor, parseFlavor('cross is:x86 target:x86_64'))] res = resolver.DependencyResolver(log, self.openRepository()) self.logFilter.add() result = res.resolve(resolveJob) assert(result.success) buildReqNames = set([ x[0] for x in result.getBuildReqs()]) buildReqFlavors = set([ str(x[2][1]) for x in result.getBuildReqs()]) assert(buildReqNames == set(['bar:runtime', 'foo:runtime', 'blah:runtime'])) crossReqNames = set([ x[0] for x in result.getCrossReqs()]) crossReqFlavors = set([ str(x[2][1]) for x in result.getCrossReqs()]) assert(crossReqNames == set(['bar:runtime', 'foo:runtime'])) assert(crossReqFlavors == set(['!cross is: x86_64'])) assert(buildReqFlavors == set(['cross is: x86'])) def testIntraTroveDepsInBuiltTroves(self): self.openRmakeRepository() builtFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'is:x86', requires='trove:foo:lib') builtFooLib = self.addComponent('foo:lib', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'is:x86') fooLib = self.addComponent('foo:lib', '1.0', 'is:x86') builtTroves = [builtFooRun.getNameVersionFlavor(), builtFooLib.getNameVersionFlavor()] resolveTroves = [ fooLib.getNameVersionFlavor()] self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = True trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves, []) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) # make sure that we don't grab the foo:lib from the resolveTroves # list even though it's a better match (intraTroveDeps should stop that) result = res.resolve(resolveJob) assert(result.success) buildReqs = result.getBuildReqs() assert(len(set([x[2][0] for x in buildReqs])) == 1) ##### # now let's try when it's in the resolveTroves list self.buildCfg.resolveTroveTups = [builtTroves] self.buildCfg.resolveTrovesOnly = False resolveJob = dephandler.ResolveJob(bt, self.buildCfg, [], []) result = res.resolve(resolveJob) assert(result.success) buildReqs = result.getBuildReqs() assert(len(set([x[2][0] for x in buildReqs])) == 1) def testNonIntraTroveDepsInBuiltTroves(self): self.openRmakeRepository() depStr = 'soname: ELF32/foo.so.1(SysV)' builtFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'is:x86', requires=depStr) builtFooLib = self.addComponent('foo:lib', '/localhost@rpl:linux//rmakehost@rpl:linux/1.0', 'is:x86', provides=depStr) fooLib = self.addComponent('foo:lib', '1.0', 'is:x86', provides=depStr) builtTroves = [builtFooRun.getNameVersionFlavor(), builtFooLib.getNameVersionFlavor()] resolveTroves = [ fooLib.getNameVersionFlavor()] self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = True trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves, []) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) # make sure that we don't grab the foo:lib from the resolveTroves # list even though it's a better match (intraTroveDeps should stop that) result = res.resolve(resolveJob) assert(result.success) buildReqs = list(result.getBuildReqs()) # assert both foo:run and foo:lib have the same version assert(str(buildReqs[0][2][0].trailingLabel()) == 'rmakehost@rpl:linux') assert(len(set([x[2][0] for x in buildReqs])) == 1) ##### # now let's try when it's in the resolveTroves list self.buildCfg.resolveTroveTups = [builtTroves] self.buildCfg.resolveTrovesOnly = False resolveJob = dephandler.ResolveJob(bt, self.buildCfg, [], []) result = res.resolve(resolveJob) assert(result.success) buildReqs = list(result.getBuildReqs()) # assert both foo:run and foo:lib have the same version assert(str(buildReqs[0][2][0].trailingLabel()) == 'rmakehost@rpl:linux') assert(len(set([x[2][0] for x in buildReqs])) == 1) def testFindTrovesPrefersBuiltTroves(self): self.openRmakeRepository() depStr = 'soname: ELF32/foo.so.1(SysV)' builtFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux//branch//rmakehost@rpl:branch/1.0', 'is:x86') fooRun = self.addComponent('foo:runtime', '1.0', 'is:x86') trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) self.buildCfg.installLabelPath = [ versions.Label('localhost@rpl:branch'), versions.Label('localhost@rpl:linux') ] builtTroves = [builtFooRun.getNameVersionFlavor()] resolveTroves = [ fooRun.getNameVersionFlavor()] resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = False result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'rmakehost@rpl:branch') def testFindTrovesPrefersResolveTroves(self): self.openRmakeRepository() # prefer resolveTroves to the repository when using findTroves. resolveFooRun = self.addComponent('foo:runtime', '/localhost@rpl:branch/1.0', 'is:x86') fooRun = self.addComponent('foo:runtime', '1.0', 'is:x86') trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) resolveTroves = [ resolveFooRun.getNameVersionFlavor()] self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = False builtTroves = [] resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'localhost@rpl:branch') def testFindTrovesPrefersResolveTrovesOnILP(self): self.openRmakeRepository() resolveFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux/1.0', 'is:x86') resolveFooRun2 = self.addComponent('foo:runtime', '/localhost@rpl:branch/1.0', 'is:x86') trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) resolveTroves = [ x.getNameVersionFlavor() for x in (resolveFooRun, resolveFooRun2) ] self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = True builtTroves = [] self.logFilter.add() resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves) res = resolver.DependencyResolver(log, self.openRepository()) result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'localhost@rpl:linux') self.buildCfg.installLabelPath = [ versions.Label('localhost@rpl:branch'), versions.Label('localhost@rpl:linux')] result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'localhost@rpl:branch') builtFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux//branch//rmakehost@rpl:branch/1.0', 'is:x86') builtTroves = [ builtFooRun.getNameVersionFlavor() ] resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves) result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'rmakehost@rpl:branch') self.buildCfg.installLabelPath = [ versions.Label('localhost@rpl:linux'), versions.Label('localhost@rpl:branch')] result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'rmakehost@rpl:branch') def testFindTrovesWillUseBuiltTroveFromArbitraryLabel(self): self.openRmakeRepository() self.openRepository() builtFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux//branch//rmakehost@local:branch/1.0', 'is:x86') resolveBarRun = self.addComponent('bar:runtime=1.0[is:x86]') resolveTroves = [ x.getNameVersionFlavor() for x in (resolveBarRun,) ] builtTroves = [ builtFooRun.getNameVersionFlavor() ] self.buildCfg.installLabelPath = [versions.Label('localhost@rpl:linux')] trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = False res = resolver.DependencyResolver(log, self.openRepository()) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves) self.logFilter.add() result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'rmakehost@local:branch') def testFindTrovesWillUseTroveFoundEarlierInResolveTroves(self): self.openRepository() fooResolve1 = self.addComponent('foo:runtime=/localhost@rpl:linux-devel/1-1-1') fooResolve2 = self.addComponent('foo:runtime=/localhost@rpl:linux/1-1-1') resolveTroves = [ [x.getNameVersionFlavor()] for x in (fooResolve1, fooResolve2,) ] builtTroves = [] trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) self.buildCfg.resolveTroveTups = resolveTroves self.buildCfg.resolveTrovesOnly = False res = resolver.DependencyResolver(log, self.openRepository()) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves) self.logFilter.add() result = res.resolve(resolveJob) assert(result.success) buildReq, = result.getBuildReqs() assert(str(buildReq[2][0].trailingLabel()) == 'localhost@rpl:linux-devel') def testFindTrovesHandlesDifferentVersionsForX86AndX86_64(self): self.openRmakeRepository() self.openRepository() builtFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@local:branch/1.0-1-0.1', 'is:x86') builtFooRun2 = self.addComponent('foo:runtime', '/localhost@rpl:linux//rmakehost@local:branch/1.0-1-0.2', 'is:x86_64') resolveFooRun = self.addComponent('foo:runtime', '/localhost@rpl:linux/1.0-1', 'is:x86_64') builtTroves = [builtFooRun.getNameVersionFlavor(), builtFooRun2.getNameVersionFlavor()] resolveTroves = [ resolveFooRun.getNameVersionFlavor()] self.buildCfg.resolveTroveTups = [resolveTroves] self.buildCfg.resolveTrovesOnly = True self.buildCfg.flavor = [parseFlavor('is:x86_64'), parseFlavor('is:x86 x86_64') ] trv = self.addComponent('bam:source') bt = self.newBuildTrove(1, *trv.getNameVersionFlavor()) bt.setBuildRequirements(['foo:runtime']) resolveJob = dephandler.ResolveJob(bt, self.buildCfg, builtTroves, []) self.logFilter.add() res = resolver.DependencyResolver(log, self.openRepository()) # make sure that we don't grab the foo:lib from the resolveTroves # list even though it's a better match (intraTroveDeps should stop that) result = res.resolve(resolveJob) assert(result.success) buildReqs = result.getBuildReqs() buildReq, = buildReqs assert((buildReq[0], buildReq[2][0], buildReq[2][1]) == builtFooRun2.getNameVersionFlavor())
# coding=utf-8 # Copyright 2019 The Authors of RL Reliability Metrics. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for online metrics.""" from absl.testing import parameterized import numpy as np from rl_reliability_metrics.metrics import metrics_online import unittest class MetricsOnlineTest(parameterized.TestCase, unittest.TestCase): @parameterized.parameters( ([0, 1], None, None, [1.41421356237, 2.12132034356]), ([0, 1], None, 1, [1.41421356237, 2.12132034356]), ([0, 1], 0.5, 0.5, [2.9954294688643497, 4.564952035367936]), ([0, 1], None, 'curve_range', [1.414213562 / 1.425, 2.121320343 / 1.425]), ) def testCorrectStddevAcrossRuns(self, timepoints, lowpass_thresh, baseline, expected): curves = [ np.array([[-1, 0, 1], [1., 1., 1.]]), np.array([[-1, 0, 1, 2], [2., 3., 4., 5.]]) ] metric = metrics_online.StddevAcrossRuns( lowpass_thresh=lowpass_thresh, eval_points=timepoints, baseline=baseline) result = metric(curves) np.testing.assert_allclose(result, expected) @parameterized.parameters( ([0, 1], None, None, [1, 1.5]), ([0, 1], None, 2, [0.5, 0.75]), ) def testCorrectIqrAcrossRuns(self, timepoints, lowpass_thresh, baseline, expected): curves = [ np.array([[-1, 0, 1], [1., 1., 1.]]), np.array([[-1, 0, 1, 2], [2., 3., 4., 5.]]) ] metric = metrics_online.IqrAcrossRuns( lowpass_thresh=lowpass_thresh, eval_points=timepoints, baseline=baseline) result = metric(curves) np.testing.assert_allclose(result, expected) @parameterized.parameters( ([0, 1], None, None, [1.75, 0.]), ([0, 1], None, 0.5, [3.5, 0.]), ) def testCorrectMadAcrossRuns(self, timepoints, lowpass_thresh, baseline, expected): curves = [ np.array([[-1, 0, 1, 2], [1., 1., 1., 1.]]), np.array([[-1, 0, 1, 2], [2., 3., 4., 5.]]), np.array([[-1, 0, 1, 2], [3., 4.5, 1., 23.]]), np.array([[-1, 0, 1, 2], [0., -1., 1, 0.]]), ] metric = metrics_online.MadAcrossRuns( lowpass_thresh=lowpass_thresh, eval_points=timepoints, baseline=baseline) result = metric(curves) np.testing.assert_allclose(result, expected) @parameterized.parameters( ([9], 3, None, [[0.], [0.], [0.35355339059]]), ([9], 3, 0.5, [[0.], [0.], [0.7071067812]]), (None, None, None, [[0.], [0.], [0.28867513411]]), (None, None, 2, [[0.], [0.], [0.1443375671]]), (None, None, 'curve_range', [[np.nan], [0.], [-1.9245008946666669]]), ) def testCorrectStddevWithinRuns(self, timepoints, window_size, baseline, expected): curves = [ np.array([[5, 7, 9], [1, 1, 1]]), np.array([[5, 7, 9, 11], [2, 3, 4, 5]]), np.array([[5, 7, 9, 10], [5, 4, 2, 1]]) ] metric = metrics_online.StddevWithinRuns(window_size, timepoints, baseline) result = metric(curves) self.assertEqual(metric.name, 'StddevWithinRuns') np.testing.assert_allclose(result, expected) @parameterized.parameters( ([9], 3, None, [[0.], [0.], [0.25]]), ([9], 3, 0.5, [[0.], [0.], [.5]]), (None, None, None, [[0.], [0.], [0.25]]), (None, None, 2, [[0.], [0.], [0.125]]), ) def testCorrectIqrWithinRuns(self, timepoints, window_size, baseline, expected): curves = [ np.array([[5, 7, 9], [1, 1, 1]]), np.array([[5, 7, 9, 11], [2, 3, 4, 5]]), np.array([[5, 7, 9, 10], [5, 4, 2, 1]]), ] metric = metrics_online.IqrWithinRuns(window_size, timepoints, baseline) result = metric(curves) self.assertEqual(metric.name, 'IqrWithinRuns') np.testing.assert_allclose(result, expected) @parameterized.parameters(([9], 3, None, [[0.], [0.], [0.25]]), ([9], 3, 0.5, [[0.], [0.], [.5]]), (None, None, None, [[0.], [0.], [0.]]), (None, None, 2, [[0.], [0.], [0.]])) def testCorrectMadWithinRuns(self, timepoints, window_size, baseline, expected): curves = [ np.array([[5, 7, 9], [1, 1, 1]]), np.array([[5, 7, 9, 11], [2, 3, 4, 5]]), np.array([[5, 7, 9, 10], [5, 4, 2, 1]]), ] metric = metrics_online.MadWithinRuns(window_size, timepoints, baseline) result = metric(curves) self.assertEqual(metric.name, 'MadWithinRuns') np.testing.assert_allclose(result, expected) def testHighFreqEnergyWithinRuns(self): t = np.arange(0, 50, 0.1) sine1 = np.sin(t - np.pi / 4) # frequency 1/(2*pi) = 0.159 sine2 = 2 * np.sin(2 * t) # frequency 2/(2*pi) = 0.318 curves = [np.array([t, sine1 + sine2])] thresh_0 = metrics_online.HighFreqEnergyWithinRuns(thresh=0)(curves)[0] thresh_158 = metrics_online.HighFreqEnergyWithinRuns( thresh=0.158)(curves)[0] thresh_16 = metrics_online.HighFreqEnergyWithinRuns(thresh=0.16)(curves)[0] thresh_20 = metrics_online.HighFreqEnergyWithinRuns(thresh=0.2)(curves)[0] thresh_32 = metrics_online.HighFreqEnergyWithinRuns(thresh=0.32)(curves)[0] self.assertGreater(thresh_0, thresh_158) self.assertGreater(thresh_158, thresh_20) self.assertGreater(thresh_20, thresh_32) self.assertEqual(thresh_0, 1) self.assertTrue(np.allclose(thresh_158, 1, rtol=8e-3)) self.assertFalse(np.allclose(thresh_16, 1, rtol=8e-3)) @parameterized.parameters( ([np.array([range(4), [1, 2, 3, 4]])], False, None, [0]), ([np.array([range(4), [1, 2, 3, 4]])], True, 0.5, [0]), ([ np.array([range(5), [-1, -2, -3, -4, -5]]), np.array([range(4), [1, 3, 2, 1.2]]) ], False, None, [4, 1.8]), ([np.array([range(5), [5, 5, 3, 6, 5]])], False, None, [2]), ([np.array([range(6), [100, 150, 90, 120, 80, 200]])], False, None, [70]), ([np.array([range(6), [100, 150, 90, 120, 80, 200]]) ], True, 10, [0.61764706])) def testCorrectMaxDrawdown(self, curves, mean_normalize, baseline, expected): metric = metrics_online.MaxDrawdown(baseline, mean_normalize) result = metric(curves) np.testing.assert_allclose(result, expected) @parameterized.parameters( (metrics_online.LowerCVaROnRaw, 0.51, None, [1.5, -0.2]), (metrics_online.LowerCVaROnRaw, 0.49, 0.1, [1.5 / 0.1, -0.2 / 0.1]), (metrics_online.LowerCVaROnRaw, 0.49, 'curve_range', [1.5 / 2.85, -0.2 / 0.595]), (metrics_online.LowerCVaROnDiffs, 0.51, None, [0.8333333, -0.4]), (metrics_online.LowerCVaROnDiffs, 0.49, 1, [0.5, -0.6]), (metrics_online.LowerCVaROnDiffs, 0.51, 2, [0.8333333 / 2, -0.4 / 2]), (metrics_online.LowerCVaROnDiffs, 0.49, 2, [0.5 / 2, -0.6 / 2]), (metrics_online.LowerCVaROnDrawdown, 0.51, None, [0, 0]), (metrics_online.LowerCVaROnDrawdown, 0.49, 2, [0, 0]), (metrics_online.UpperCVaROnDrawdown, 0.25, None, [0, 0.8]), (metrics_online.UpperCVaROnDrawdown, 0.49, 2, [0, 0.5 / 2]), (metrics_online.LowerCVaROnAcross, 0.51, None, [0.1, -0.5]), (metrics_online.LowerCVaROnAcross, 0.49, 2, [0.05, -0.25]), ) def testCorrectCVaR(self, cvar_fn, alpha, baseline, expected): curves = [ np.array([[1, 2, 4, 5], [1, 2, 3, 4]]), np.array([range(4), [0.3, 0.1, -0.5, 1]]) ] metric = cvar_fn(alpha, baseline) result = metric(curves) np.testing.assert_allclose(result, expected) @parameterized.parameters( ([np.array([[101, 201, 301, 401], [1, 2, 3, 4]])], [101, 401 ], 1, None, [[1, 4]]), ([ np.array([[1, 1001, 2001, 3001, 4001], [-1, -2, -3, -4, -5]]), np.array([[1, 1001, 2001, 3001], [1, 3, 2, 1.2]]) ], [1001, 2001], 1001, None, [[-1.5, -2.5], [2, 2.5]]), ([np.array([range(3), [3, 6, 9]]), np.array([range(2), [7.5, 7.7]])], None, None, None, [[4.5], [7.6]]), ([np.array([range(3), [3, 6, 9]]), np.array([range(2), [7.5, 7.7]])], None, None, 0.8, [[4.5 / 0.8], [7.6 / 0.8]]), ([np.array([range(2), [7.5, 7.7]])], None, None, (0.8, 10.3), [[0.71578947368]]), ([np.array([range(3), [7.5, 7.7, 1.0]])], None, None, None, [[7.5]]), ) def testCorrectMedianPerf(self, curves, timepoints, window_size, baseline, expected): metric = metrics_online.MedianPerfDuringTraining(window_size, timepoints, baseline) result = metric(curves) np.testing.assert_allclose(result, expected) def testMetricProperties_BiggerIsBetter(self): for metric in metrics_online.all_online_metrics(): if metric.__name__ in [ 'LowerCVaROnAcross', 'LowerCVaROnDiffs', 'UpperCVaROnAcross', 'UpperCVaROnDiffs', 'LowerCVaROnRaw', 'UpperCVaROnRaw', 'MedianPerfDuringTraining' ]: self.assertTrue(metric.bigger_is_better) elif metric.__name__ in [ 'IqrAcrossRuns', 'MadAcrossRuns', 'StddevAcrossRuns', 'IqrWithinRuns', 'MadWithinRuns', 'StddevWithinRuns', 'HighFreqEnergyWithinRuns', 'MaxDrawdown', 'LowerCVaROnDrawdown', 'UpperCVaROnDrawdown' ]: self.assertFalse(metric.bigger_is_better) else: raise ValueError('Metric %s not accounted for.' % metric.__name__) def testMetricProperties_ResultDimensions(self): for metric in metrics_online.all_online_metrics(): if metric.__name__ in [ 'IqrAcrossRuns', 'MadAcrossRuns', 'StddevAcrossRuns', 'LowerCVaROnAcross', 'UpperCVaROnAcross' ]: self.assertEqual(metric.result_dimensions, 'ATP') elif metric.__name__ in [ 'IqrWithinRuns', 'MadWithinRuns', 'StddevWithinRuns', 'MedianPerfDuringTraining' ]: self.assertEqual(metric.result_dimensions, 'ATRP') elif metric.__name__ in [ 'LowerCVaROnDiffs', 'LowerCVaROnDrawdown', 'LowerCVaROnRaw', 'UpperCVaROnDiffs', 'UpperCVaROnDrawdown', 'UpperCVaROnRaw', 'MaxDrawdown', 'HighFreqEnergyWithinRuns' ]: self.assertEqual(metric.result_dimensions, 'ATR') else: raise ValueError('Metric %s not accounted for.' % metric.__name__) def testRegistry(self): registry = metrics_online.REGISTRY self.assertEqual(registry['MedianPerfDuringTraining'], metrics_online.MedianPerfDuringTraining) self.assertEqual(registry['IqrWithinRuns'], metrics_online.IqrWithinRuns) self.assertEqual(registry['LowerCVaROnDiffs'], metrics_online.LowerCVaROnDiffs) if __name__ == '__main__': unittest.main()
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from django.views import generic from horizon import exceptions from horizon import messages from horizon import tables from horizon.utils import memoized from horizon import workflows from openstack_dashboard import api from openstack_dashboard.api import keystone from openstack_dashboard import policy from openstack_dashboard import usage from openstack_dashboard.usage import quotas from openstack_dashboard.dashboards.identity.projects \ import tables as project_tables from openstack_dashboard.dashboards.identity.projects \ import workflows as project_workflows from openstack_dashboard.dashboards.project.overview \ import views as project_views PROJECT_INFO_FIELDS = ("domain_id", "domain_name", "name", "description", "enabled") INDEX_URL = "horizon:identity:projects:index" class TenantContextMixin(object): @memoized.memoized_method def get_object(self): tenant_id = self.kwargs['tenant_id'] try: return api.keystone.tenant_get(self.request, tenant_id, admin=True) except Exception: exceptions.handle(self.request, _('Unable to retrieve project information.'), redirect=reverse(INDEX_URL)) def get_context_data(self, **kwargs): context = super(TenantContextMixin, self).get_context_data(**kwargs) context['tenant'] = self.get_object() return context class IndexView(tables.DataTableView): table_class = project_tables.TenantsTable template_name = 'identity/projects/index.html' page_title = _("Projects") def has_more_data(self, table): return self._more def get_data(self): tenants = [] marker = self.request.GET.get( project_tables.TenantsTable._meta.pagination_param, None) domain_context = self.request.session.get('domain_context', None) self._more = False print 'identify marker:' print marker print ':===============:' print dir(marker) print ':===============:' if policy.check((("identity", "identity:list_projects"),), self.request): try: tenants, self._more = api.keystone.tenant_list( self.request, domain=domain_context, paginate=True, marker=marker) except Exception: exceptions.handle(self.request, _("Unable to retrieve project list.")) elif policy.check((("identity", "identity:list_user_projects"),), self.request): try: tenants, self._more = api.keystone.tenant_list( self.request, user=self.request.user.id, paginate=True, marker=marker, admin=False) except Exception: exceptions.handle(self.request, _("Unable to retrieve project information.")) else: msg = \ _("Insufficient privilege level to view project information.") messages.info(self.request, msg) return tenants class ProjectUsageView(usage.UsageView): table_class = usage.ProjectUsageTable usage_class = usage.ProjectUsage template_name = 'identity/projects/usage.html' csv_response_class = project_views.ProjectUsageCsvRenderer csv_template_name = 'project/overview/usage.csv' page_title = _("Project Usage") def get_data(self): super(ProjectUsageView, self).get_data() return self.usage.get_instances() class CreateProjectView(workflows.WorkflowView): workflow_class = project_workflows.CreateProject def get_initial(self): initial = super(CreateProjectView, self).get_initial() # Set the domain of the project domain = api.keystone.get_default_domain(self.request) initial["domain_id"] = domain.id initial["domain_name"] = domain.name # get initial quota defaults try: quota_defaults = quotas.get_default_quota_data(self.request) try: if api.base.is_service_enabled(self.request, 'network') and \ api.neutron.is_quotas_extension_supported( self.request): # TODO(jpichon): There is no API to access the Neutron # default quotas (LP#1204956). For now, use the values # from the current project. project_id = self.request.user.project_id quota_defaults += api.neutron.tenant_quota_get( self.request, tenant_id=project_id) except Exception: error_msg = _('Unable to retrieve default Neutron quota ' 'values.') self.add_error_to_step(error_msg, 'create_quotas') for field in quotas.QUOTA_FIELDS: initial[field] = quota_defaults.get(field).limit except Exception: error_msg = _('Unable to retrieve default quota values.') self.add_error_to_step(error_msg, 'create_quotas') return initial class UpdateProjectView(workflows.WorkflowView): workflow_class = project_workflows.UpdateProject def get_initial(self): initial = super(UpdateProjectView, self).get_initial() project_id = self.kwargs['tenant_id'] initial['project_id'] = project_id try: # get initial project info project_info = api.keystone.tenant_get(self.request, project_id, admin=True) for field in PROJECT_INFO_FIELDS: initial[field] = getattr(project_info, field, None) # Retrieve the domain name where the project belong if keystone.VERSIONS.active >= 3: try: domain = api.keystone.domain_get(self.request, initial["domain_id"]) initial["domain_name"] = domain.name except Exception: exceptions.handle(self.request, _('Unable to retrieve project domain.'), redirect=reverse(INDEX_URL)) # get initial project quota quota_data = quotas.get_tenant_quota_data(self.request, tenant_id=project_id) if api.base.is_service_enabled(self.request, 'network') and \ api.neutron.is_quotas_extension_supported(self.request): quota_data += api.neutron.tenant_quota_get( self.request, tenant_id=project_id) for field in quotas.QUOTA_FIELDS: initial[field] = quota_data.get(field).limit except Exception: exceptions.handle(self.request, _('Unable to retrieve project details.'), redirect=reverse(INDEX_URL)) return initial class DetailProjectView(generic.TemplateView): template_name = 'identity/projects/detail.html' def get_context_data(self, **kwargs): context = super(DetailProjectView, self).get_context_data(**kwargs) project = self.get_data() table = project_tables.TenantsTable(self.request) context["project"] = project context["page_title"] = _("Project Details: %s") % project.name context["url"] = reverse(INDEX_URL) context["actions"] = table.render_row_actions(project) return context @memoized.memoized_method def get_data(self): try: project_id = self.kwargs['project_id'] project = api.keystone.tenant_get(self.request, project_id) except Exception: exceptions.handle(self.request, _('Unable to retrieve project details.'), redirect=reverse(INDEX_URL)) return project
########################################################################## # # Copyright (c) 2012, John Haddon. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import sys import unittest import inspect import types import IECore import Gaffer ## A useful base class for creating test cases for nodes. class TestCase( unittest.TestCase ) : def tearDown( self ) : # Clear any previous exceptions, as they can be holding # references to resources we would like to die. This is # important for both the UI tests where we wish to check # that all widgets have been destroyed, and also for the # shutdown tests that are run when the test application # exits. if "_ExpectedFailure" in str( sys.exc_info()[0] ) : # the expected failure exception in the unittest module # unhelpfully also hangs on to exceptions, so we remove # that before calling exc_clear(). sys.exc_info()[1].exc_info = ( None, None, None ) sys.exc_clear() ## Attempts to ensure that the hashes for a node # are reasonable by jiggling around input values # and checking that the hash changes when it should. def assertHashesValid( self, node, inputsToIgnore=[], outputsToIgnore=[] ) : # find all input ValuePlugs inputPlugs = [] def __walkInputs( parent ) : for child in parent.children() : if isinstance( child, Gaffer.CompoundPlug ) : __walkInputs( child ) elif isinstance( child, Gaffer.ValuePlug ) : if child not in inputsToIgnore : inputPlugs.append( child ) __walkInputs( node ) self.failUnless( len( inputPlugs ) > 0 ) numTests = 0 for inputPlug in inputPlugs : for outputPlug in node.affects( inputPlug ) : if outputPlug in outputsToIgnore : continue hash = outputPlug.hash() value = inputPlug.getValue() if isinstance( value, float ) : increment = 0.1 elif isinstance( value, int ) : increment = 1 elif isinstance( value, basestring ) : increment = "a" else : # don't know how to deal with this # value type. continue inputPlug.setValue( value + increment ) if inputPlug.getValue() == value : inputPlug.setValue( value - increment ) if inputPlug.getValue() == value : continue self.assertNotEqual( outputPlug.hash(), hash, outputPlug.fullName() + " hash not affected by " + inputPlug.fullName() ) numTests += 1 self.failUnless( numTests > 0 ) def assertTypeNamesArePrefixed( self, module, namesToIgnore = () ) : for name in dir( module ) : cls = getattr( module, name ) if not inspect.isclass( cls ) : continue if issubclass( cls, IECore.RunTimeTyped ) : if cls.staticTypeName() in namesToIgnore : continue self.assertEqual( cls.staticTypeName(), module.__name__ + "::" + cls.__name__ ) def assertDefaultNamesAreCorrect( self, module ) : for name in dir( module ) : cls = getattr( module, name ) if not inspect.isclass( cls ) or not issubclass( cls, Gaffer.GraphComponent ) : continue try : instance = cls() except : continue self.assertEqual( instance.getName(), cls.staticTypeName().rpartition( ":" )[2] ) def assertNodesAreDocumented( self, module, additionalTerminalPlugTypes = () ) : terminalPlugTypes = ( Gaffer.ArrayPlug, Gaffer.V2fPlug, Gaffer.V3fPlug, Gaffer.V2iPlug, Gaffer.V3iPlug, Gaffer.Color3fPlug, Gaffer.Color4fPlug, Gaffer.SplineffPlug, Gaffer.SplinefColor3fPlug, Gaffer.Box2iPlug, Gaffer.Box3iPlug, Gaffer.Box2fPlug, Gaffer.Box3fPlug, Gaffer.TransformPlug, Gaffer.Transform2DPlug, Gaffer.CompoundDataPlug.MemberPlug, additionalTerminalPlugTypes ) undocumentedNodes = [] undocumentedPlugs = [] for name in dir( module ) : cls = getattr( module, name ) if not inspect.isclass( cls ) or not issubclass( cls, Gaffer.Node ) : continue try : node = cls() except : continue description = Gaffer.Metadata.nodeValue( node, "description", inherit = False ) if (not description) or description.isspace() : undocumentedNodes.append( node.getName() ) def checkPlugs( graphComponent ) : if isinstance( graphComponent, Gaffer.Plug ) and not graphComponent.getName().startswith( "__" ) : description = Gaffer.Metadata.plugValue( graphComponent, "description" ) if (not description) or description.isspace() : undocumentedPlugs.append( graphComponent.fullName() ) if not isinstance( graphComponent, terminalPlugTypes ) : for plug in graphComponent.children( Gaffer.Plug ) : checkPlugs( plug ) checkPlugs( node ) self.assertEqual( undocumentedNodes, [] ) self.assertEqual( undocumentedPlugs, [] ) ## We don't serialise plug values when they're at their default, so # newly constructed nodes must have all their plugs be at the default value. def assertNodesConstructWithDefaultValues( self, module ) : for name in dir( module ) : cls = getattr( module, name ) if not inspect.isclass( cls ) or not issubclass( cls, Gaffer.Node ) : continue try : node = cls() except : continue for plug in node.children( Gaffer.Plug ) : if plug.direction() == plug.Direction.In and isinstance( plug, Gaffer.ValuePlug ) : self.assertTrue( plug.isSetToDefault(), plug.fullName() + " not at default value following construction" )
from google.appengine.ext import ndb from controllers.api.api_district_controller import ApiDistrictListController, ApiDistrictEventsController, ApiDistrictRankingsController from controllers.api.api_event_controller import ApiEventController, ApiEventTeamsController, \ ApiEventMatchesController, ApiEventStatsController, \ ApiEventRankingsController, ApiEventAwardsController, ApiEventListController, ApiEventDistrictPointsController from controllers.api.api_match_controller import ApiMatchController from controllers.api.api_team_controller import ApiTeamController, ApiTeamEventsController, ApiTeamEventAwardsController, \ ApiTeamEventMatchesController, ApiTeamMediaController, ApiTeamYearsParticipatedController, \ ApiTeamListController, ApiTeamHistoryEventsController, ApiTeamHistoryAwardsController from models.event import Event from models.event_team import EventTeam from models.team import Team class CacheClearer(object): @classmethod def get_award_cache_keys_and_controllers(cls, affected_refs): """ Gets cache keys and controllers that references this award """ event_keys = affected_refs['event'] team_keys = affected_refs['team_list'] years = affected_refs['year'] return cls._get_event_awards_cache_keys_and_controllers(event_keys) + \ cls._get_team_event_awards_cache_keys_and_controllers(team_keys, event_keys) @classmethod def get_event_cache_keys_and_controllers(cls, affected_refs): """ Gets cache keys and controllers that references this event """ event_keys = affected_refs['key'] years = affected_refs['year'] event_district_abbrevs = affected_refs['event_district_abbrev'] event_team_keys_future = EventTeam.query(EventTeam.event.IN([event_key for event_key in event_keys])).fetch_async(None, keys_only=True) team_keys = set() for et_key in event_team_keys_future.get_result(): team_key_name = et_key.id().split('_')[1] team_keys.add(ndb.Key(Team, team_key_name)) return cls._get_events_cache_keys_and_controllers(event_keys) + \ cls._get_event_district_points_cache_keys_and_controllers(event_keys) + \ cls._get_eventlist_cache_keys_and_controllers(years) + \ cls._get_team_events_cache_keys_and_controllers(team_keys, years) + \ cls._get_districtlist_cache_keys_and_controllers(years) + \ cls._get_district_events_cache_keys_and_controllers(event_district_abbrevs, years) + \ cls._get_district_rankings_cache_keys_and_controllers(event_district_abbrevs, years) @classmethod def get_eventteam_cache_keys_and_controllers(cls, affected_refs): """ Gets cache keys and controllers that references this eventteam """ event_keys = affected_refs['event'] team_keys = affected_refs['team'] years = affected_refs['year'] return cls._get_eventteams_cache_keys_and_controllers(event_keys) + \ cls._get_team_events_cache_keys_and_controllers(team_keys, years) + \ cls._get_team_years_participated_cache_keys_and_controllers(team_keys) @classmethod def get_match_cache_keys_and_controllers(cls, affected_refs): """ Gets cache keys and controllers that references this match """ match_keys = affected_refs['key'] event_keys = affected_refs['event'] team_keys = affected_refs['team_keys'] years = affected_refs['year'] return cls._get_match_cache_keys_and_controllers(match_keys) + \ cls._get_matches_cache_keys_and_controllers(event_keys) + \ cls._get_team_event_matches_cache_keys_and_controllers(team_keys, event_keys) @classmethod def get_media_cache_keys_and_controllers(cls, affected_refs): """ Gets cache keys and controllers that reference this media """ reference_keys = affected_refs['references'] years = affected_refs['year'] return cls._get_media_cache_keys_and_controllers(reference_keys, years) @classmethod def get_team_cache_keys_and_controllers(cls, affected_refs): """ Gets cache keys and controllers that references this team """ team_keys = affected_refs['key'] event_team_keys_future = EventTeam.query(EventTeam.team.IN([team_key for team_key in team_keys])).fetch_async(None, keys_only=True) event_keys = set() for et_key in event_team_keys_future.get_result(): event_key_name = et_key.id().split('_')[0] event_keys.add(ndb.Key(Event, event_key_name)) return cls._get_teams_cache_keys_and_controllers(team_keys) + \ cls._get_eventteams_cache_keys_and_controllers(event_keys) + \ cls._get_teamlist_cache_keys_and_controllers(team_keys) @classmethod def _get_districtlist_cache_keys_and_controllers(cls, years): cache_keys_and_controllers = [] for year in filter(None, years): cache_keys_and_controllers.append((ApiDistrictListController.get_cache_key_from_format(year), ApiDistrictListController)) return cache_keys_and_controllers @classmethod def _get_district_events_cache_keys_and_controllers(cls, district_shorts, years): cache_keys_and_controllers = [] for district_short in filter(None, district_shorts): for year in filter(None, years): cache_keys_and_controllers.append((ApiDistrictEventsController.get_cache_key_from_format(district_short, year), ApiDistrictEventsController)) return cache_keys_and_controllers @classmethod def _get_district_rankings_cache_keys_and_controllers(cls, district_shorts, years): cache_keys_and_controllers = [] for district_short in filter(None, district_shorts): for year in filter(None, years): cache_keys_and_controllers.append((ApiDistrictRankingsController.get_cache_key_from_format(district_short, year), ApiDistrictRankingsController)) return cache_keys_and_controllers @classmethod def _get_event_awards_cache_keys_and_controllers(cls, event_keys): cache_keys_and_controllers = [] for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiEventAwardsController.get_cache_key_from_format(event_key.id()), ApiEventAwardsController)) return cache_keys_and_controllers @classmethod def _get_event_district_points_cache_keys_and_controllers(cls, event_keys): cache_keys_and_controllers = [] for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiEventDistrictPointsController.get_cache_key_from_format(event_key.id()), ApiEventDistrictPointsController)) return cache_keys_and_controllers @classmethod def _get_events_cache_keys_and_controllers(cls, event_keys): cache_keys_and_controllers = [] for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiEventController.get_cache_key_from_format(event_key.id()), ApiEventController)) cache_keys_and_controllers.append((ApiEventStatsController.get_cache_key_from_format(event_key.id()), ApiEventStatsController)) cache_keys_and_controllers.append((ApiEventRankingsController.get_cache_key_from_format(event_key.id()), ApiEventRankingsController)) return cache_keys_and_controllers @classmethod def _get_eventlist_cache_keys_and_controllers(cls, years): cache_keys_and_controllers = [] for year in filter(None, years): cache_keys_and_controllers.append((ApiEventListController.get_cache_key_from_format(year), ApiEventListController)) return cache_keys_and_controllers @classmethod def _get_eventteams_cache_keys_and_controllers(cls, event_keys): cache_keys_and_controllers = [] for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiEventTeamsController.get_cache_key_from_format(event_key.id()), ApiEventTeamsController)) return cache_keys_and_controllers @classmethod def _get_match_cache_keys_and_controllers(cls, match_keys): cache_keys_and_controllers = [] for match_key in filter(None, match_keys): cache_keys_and_controllers.append((ApiMatchController.get_cache_key_from_format(match_key.id()), ApiMatchController)) return cache_keys_and_controllers @classmethod def _get_matches_cache_keys_and_controllers(cls, event_keys): cache_keys_and_controllers = [] for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiEventMatchesController.get_cache_key_from_format(event_key.id()), ApiEventMatchesController)) return cache_keys_and_controllers @classmethod def _get_media_cache_keys_and_controllers(cls, team_keys, years): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): for year in filter(None, years): cache_keys_and_controllers.append((ApiTeamMediaController.get_cache_key_from_format(team_key.id(), year), ApiTeamMediaController)) return cache_keys_and_controllers @classmethod def _get_teams_cache_keys_and_controllers(cls, team_keys): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): cache_keys_and_controllers.append((ApiTeamController.get_cache_key_from_format(team_key.id()), ApiTeamController)) return cache_keys_and_controllers @classmethod def _get_team_event_awards_cache_keys_and_controllers(cls, team_keys, event_keys): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiTeamEventAwardsController.get_cache_key_from_format(team_key.id(), event_key.id()), ApiTeamEventAwardsController)) cache_keys_and_controllers.append((ApiTeamHistoryAwardsController.get_cache_key_from_format(team_key.id()), ApiTeamHistoryAwardsController)) return cache_keys_and_controllers @classmethod def _get_team_event_matches_cache_keys_and_controllers(cls, team_keys, event_keys): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): for event_key in filter(None, event_keys): cache_keys_and_controllers.append((ApiTeamEventMatchesController.get_cache_key_from_format(team_key.id(), event_key.id()), ApiTeamEventMatchesController)) return cache_keys_and_controllers @classmethod def _get_team_events_cache_keys_and_controllers(cls, team_keys, years): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): for year in filter(None, years): cache_keys_and_controllers.append((ApiTeamEventsController.get_cache_key_from_format(team_key.id(), year), ApiTeamEventsController)) cache_keys_and_controllers.append((ApiTeamHistoryEventsController.get_cache_key_from_format(team_key.id()), ApiTeamHistoryEventsController)) return cache_keys_and_controllers @classmethod def _get_team_years_participated_cache_keys_and_controllers(cls, team_keys): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): cache_keys_and_controllers.append((ApiTeamYearsParticipatedController.get_cache_key_from_format(team_key.id()), ApiTeamYearsParticipatedController)) return cache_keys_and_controllers @classmethod def _get_teamlist_cache_keys_and_controllers(cls, team_keys): cache_keys_and_controllers = [] for team_key in filter(None, team_keys): page_num = int(team_key.id()[3:]) / ApiTeamListController.PAGE_SIZE cache_keys_and_controllers.append((ApiTeamListController.get_cache_key_from_format(page_num), ApiTeamListController)) return cache_keys_and_controllers
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Presubmit script for Chromium JS resources. See chrome/browser/PRESUBMIT.py """ import regex_check class JSChecker(object): def __init__(self, input_api, output_api, file_filter=None): self.input_api = input_api self.output_api = output_api self.file_filter = file_filter def RegexCheck(self, line_number, line, regex, message): return regex_check.RegexCheck( self.input_api.re, line_number, line, regex, message) def ChromeSendCheck(self, i, line): """Checks for a particular misuse of 'chrome.send'.""" return self.RegexCheck(i, line, r"chrome\.send\('[^']+'\s*(, \[\])\)", 'Passing an empty array to chrome.send is unnecessary') def ConstCheck(self, i, line): """Check for use of the 'const' keyword.""" if self.input_api.re.search(r'\*\s+@const', line): # Probably a JsDoc line return '' return self.RegexCheck(i, line, r'(?:^|\s|\()(const)\s', 'Use /** @const */ var varName; instead of const varName;') def EndJsDocCommentCheck(self, i, line): msg = 'End JSDoc comments with */ instead of **/' def _check(regex): return self.RegexCheck(i, line, regex, msg) return _check(r'^\s*(\*\*/)\s*$') or _check(r'/\*\* @[a-zA-Z]+.* (\*\*/)') def ExtraDotInGenericCheck(self, i, line): return self.RegexCheck(i, line, r"((?:Array|Object|Promise)\.<)", "Don't use a dot after generics (Object.<T> should be Object<T>).") def GetElementByIdCheck(self, i, line): """Checks for use of 'document.getElementById' instead of '$'.""" return self.RegexCheck(i, line, r"(document\.getElementById)\('", "Use $('id') or getSVGElement('id') from chrome://resources/js/util.js " "instead of document.getElementById('id')") def InheritDocCheck(self, i, line): """Checks for use of '@inheritDoc' instead of '@override'.""" return self.RegexCheck(i, line, r"\* (@inheritDoc)", "@inheritDoc is deprecated, use @override instead") def PolymerLocalIdCheck(self, i, line): """Checks for use of element.$.localId.""" return self.RegexCheck(i, line, r"(?<!this)(\.\$)[\[\.]", "Please only use this.$.localId, not element.$.localId") def WrapperTypeCheck(self, i, line): """Check for wrappers (new String()) instead of builtins (string).""" return self.RegexCheck(i, line, r"(?:/\*)?\*.*?@(?:param|return|type) ?" # /** @param/@return/@type r"{[^}]*\b(String|Boolean|Number)\b[^}]*}", # {(Boolean|Number|String)} "Don't use wrapper types (i.e. new String() or @type {String})") def VarNameCheck(self, i, line): """See the style guide. http://goo.gl/uKir6""" return self.RegexCheck(i, line, r"var (?!g_\w+)([a-z]*[_$][\w_$]*)(?<! \$)", "Please use var namesLikeThis <http://goo.gl/uKir6>") def _GetErrorHighlight(self, start, length): """Takes a start position and a length, and produces a row of '^'s to highlight the corresponding part of a string. """ return start * ' ' + length * '^' def _MakeErrorOrWarning(self, error_text, filename): """Takes a few lines of text indicating a style violation and turns it into a PresubmitError (if |filename| is in a directory where we've already taken out all the style guide violations) or a PresubmitPromptWarning (if it's in a directory where we haven't done that yet). """ # TODO(tbreisacher): Once we've cleaned up the style nits in all of # resources/ we can get rid of this function. path = self.input_api.os_path resources = path.join(self.input_api.PresubmitLocalPath(), 'resources') dirs = ( path.join(resources, 'bookmark_manager'), path.join(resources, 'extensions'), path.join(resources, 'file_manager'), path.join(resources, 'help'), path.join(resources, 'history'), path.join(resources, 'net_export'), path.join(resources, 'net_internals'), path.join(resources, 'network_action_predictor'), path.join(resources, 'ntp4'), path.join(resources, 'options'), path.join(resources, 'password_manager_internals'), path.join(resources, 'print_preview'), path.join(resources, 'profiler'), path.join(resources, 'sync_promo'), path.join(resources, 'tracing'), path.join(resources, 'uber'), ) if filename.startswith(dirs): return self.output_api.PresubmitError(error_text) else: return self.output_api.PresubmitPromptWarning(error_text) def ClosureLint(self, file_to_lint, source=None): """Lints |file_to_lint| and returns the errors.""" import sys import warnings old_path = sys.path old_filters = warnings.filters try: closure_linter_path = self.input_api.os_path.join( self.input_api.change.RepositoryRoot(), "third_party", "closure_linter") gflags_path = self.input_api.os_path.join( self.input_api.change.RepositoryRoot(), "third_party", "python_gflags") sys.path.insert(0, closure_linter_path) sys.path.insert(0, gflags_path) warnings.filterwarnings('ignore', category=DeprecationWarning) from closure_linter import errors, runner from closure_linter.common import errorhandler import gflags finally: sys.path = old_path warnings.filters = old_filters class ErrorHandlerImpl(errorhandler.ErrorHandler): """Filters out errors that don't apply to Chromium JavaScript code.""" def __init__(self, re): self._errors = [] self.re = re def HandleFile(self, filename, first_token): self._filename = filename def HandleError(self, error): if (self._valid(error)): error.filename = self._filename self._errors.append(error) def GetErrors(self): return self._errors def HasErrors(self): return bool(self._errors) def _valid(self, error): """Check whether an error is valid. Most errors are valid, with a few exceptions which are listed here. """ is_grit_statement = bool( self.re.search("</?(include|if)", error.token.line)) # Ignore missing spaces before "(" until Promise#catch issue is solved. # http://crbug.com/338301 if (error.code == errors.MISSING_SPACE and error.token.string == '(' and 'catch(' in error.token.line): return False # Ignore "}.bind(" errors. http://crbug.com/397697 if (error.code == errors.MISSING_SEMICOLON_AFTER_FUNCTION and '}.bind(' in error.token.line): return False return not is_grit_statement and error.code not in [ errors.COMMA_AT_END_OF_LITERAL, errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE, errors.LINE_TOO_LONG, errors.MISSING_JSDOC_TAG_THIS, ] # Keep this in sync with third_party/closure_compiler/closure_args.gypi gflags.FLAGS.custom_jsdoc_tags = ( 'abstract', 'attribute', 'default', 'demo', 'element', 'group', 'hero', 'polymerBehavior', 'status', 'submodule', ) error_handler = ErrorHandlerImpl(self.input_api.re) runner.Run(file_to_lint, error_handler, source=source) return error_handler.GetErrors() def RunChecks(self): """Check for violations of the Chromium JavaScript style guide. See http://chromium.org/developers/web-development-style-guide#TOC-JavaScript """ results = [] affected_files = self.input_api.change.AffectedFiles( file_filter=self.file_filter, include_deletes=False) affected_js_files = filter(lambda f: f.LocalPath().endswith('.js'), affected_files) for f in affected_js_files: error_lines = [] # Check for the following: # * document.getElementById() # * the 'const' keyword # * Passing an empty array to 'chrome.send()' for i, line in enumerate(f.NewContents(), start=1): error_lines += filter(None, [ self.ChromeSendCheck(i, line), self.ConstCheck(i, line), self.GetElementByIdCheck(i, line), self.EndJsDocCommentCheck(i, line), self.ExtraDotInGenericCheck(i, line), self.InheritDocCheck(i, line), self.PolymerLocalIdCheck(i, line), self.WrapperTypeCheck(i, line), self.VarNameCheck(i, line), ]) # Use closure linter to check for several different errors. lint_errors = self.ClosureLint(self.input_api.os_path.join( self.input_api.change.RepositoryRoot(), f.LocalPath())) for error in lint_errors: highlight = self._GetErrorHighlight( error.token.start_index, error.token.length) error_msg = ' line %d: E%04d: %s\n%s\n%s' % ( error.token.line_number, error.code, error.message, error.token.line.rstrip(), highlight) error_lines.append(error_msg) if error_lines: error_lines = [ 'Found JavaScript style violations in %s:' % f.LocalPath()] + error_lines results.append(self._MakeErrorOrWarning( '\n'.join(error_lines), f.AbsoluteLocalPath())) if results: results.append(self.output_api.PresubmitNotifyResult( 'See the JavaScript style guide at ' 'http://www.chromium.org/developers/web-development-style-guide' '#TOC-JavaScript')) return results
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ducktape.tests.test import Test from ducktape.mark.resource import cluster from ducktape.utils.util import wait_until from ducktape.mark import parametrize, matrix from ducktape.cluster.remoteaccount import RemoteCommandError from ducktape.errors import TimeoutError from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.kafka import KafkaService from kafkatest.services.connect import ConnectServiceBase, ConnectStandaloneService, ErrorTolerance from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.services.security.security_config import SecurityConfig import hashlib import json import os.path class ConnectStandaloneFileTest(Test): """ Simple test of Kafka Connect that produces data from a file in one standalone process and consumes it on another, validating the output is identical to the input. """ FILE_SOURCE_CONNECTOR = 'org.apache.kafka.connect.file.FileStreamSourceConnector' FILE_SINK_CONNECTOR = 'org.apache.kafka.connect.file.FileStreamSinkConnector' INPUT_FILE = "/mnt/connect.input" OUTPUT_FILE = "/mnt/connect.output" OFFSETS_FILE = "/mnt/connect.offsets" TOPIC = "${file:%s:topic.external}" % ConnectServiceBase.EXTERNAL_CONFIGS_FILE TOPIC_TEST = "test" FIRST_INPUT_LIST = ["foo", "bar", "baz"] FIRST_INPUT = "\n".join(FIRST_INPUT_LIST) + "\n" SECOND_INPUT_LIST = ["razz", "ma", "tazz"] SECOND_INPUT = "\n".join(SECOND_INPUT_LIST) + "\n" SCHEMA = { "type": "string", "optional": False } def __init__(self, test_context): super(ConnectStandaloneFileTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.topics = { 'test' : { 'partitions': 1, 'replication-factor': 1 } } self.zk = ZookeeperService(test_context, self.num_zk) @cluster(num_nodes=5) @parametrize(converter="org.apache.kafka.connect.json.JsonConverter", schemas=True) @parametrize(converter="org.apache.kafka.connect.json.JsonConverter", schemas=False) @parametrize(converter="org.apache.kafka.connect.storage.StringConverter", schemas=None) @parametrize(security_protocol=SecurityConfig.PLAINTEXT) @cluster(num_nodes=6) @parametrize(security_protocol=SecurityConfig.SASL_SSL) def test_file_source_and_sink(self, converter="org.apache.kafka.connect.json.JsonConverter", schemas=True, security_protocol='PLAINTEXT'): """ Validates basic end-to-end functionality of Connect standalone using the file source and sink converters. Includes parameterizations to test different converters (which also test per-connector converter overrides), schema/schemaless modes, and security support. """ assert converter != None, "converter type must be set" # Template parameters. Note that we don't set key/value.converter. These default to JsonConverter and we validate # converter overrides via the connector configuration. if converter != "org.apache.kafka.connect.json.JsonConverter": self.override_key_converter = converter self.override_value_converter = converter self.schemas = schemas self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=security_protocol, topics=self.topics) self.source = ConnectStandaloneService(self.test_context, self.kafka, [self.INPUT_FILE, self.OFFSETS_FILE]) self.sink = ConnectStandaloneService(self.test_context, self.kafka, [self.OUTPUT_FILE, self.OFFSETS_FILE]) self.consumer_validator = ConsoleConsumer(self.test_context, 1, self.kafka, self.TOPIC_TEST, consumer_timeout_ms=10000) self.zk.start() self.kafka.start() self.source.set_configs(lambda node: self.render("connect-standalone.properties", node=node), [self.render("connect-file-source.properties")]) self.sink.set_configs(lambda node: self.render("connect-standalone.properties", node=node), [self.render("connect-file-sink.properties")]) self.source.set_external_configs(lambda node: self.render("connect-file-external.properties", node=node)) self.sink.set_external_configs(lambda node: self.render("connect-file-external.properties", node=node)) self.source.start() self.sink.start() # Generating data on the source node should generate new records and create new output on the sink node self.source.node.account.ssh("echo -e -n " + repr(self.FIRST_INPUT) + " >> " + self.INPUT_FILE) wait_until(lambda: self.validate_output(self.FIRST_INPUT), timeout_sec=60, err_msg="Data added to input file was not seen in the output file in a reasonable amount of time.") # Restarting both should result in them picking up where they left off, # only processing new data. self.source.restart() self.sink.restart() self.source.node.account.ssh("echo -e -n " + repr(self.SECOND_INPUT) + " >> " + self.INPUT_FILE) wait_until(lambda: self.validate_output(self.FIRST_INPUT + self.SECOND_INPUT), timeout_sec=60, err_msg="Sink output file never converged to the same state as the input file") # Validate the format of the data in the Kafka topic self.consumer_validator.run() expected = json.dumps([line if not self.schemas else { "schema": self.SCHEMA, "payload": line } for line in self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST]) decoder = (json.loads if converter.endswith("JsonConverter") else str) actual = json.dumps([decoder(x) for x in self.consumer_validator.messages_consumed[1]]) assert expected == actual, "Expected %s but saw %s in Kafka" % (expected, actual) def validate_output(self, value): try: output_hash = list(self.sink.node.account.ssh_capture("md5sum " + self.OUTPUT_FILE))[0].strip().split()[0] return output_hash == hashlib.md5(value).hexdigest() except RemoteCommandError: return False @cluster(num_nodes=5) @parametrize(error_tolerance=ErrorTolerance.ALL) @parametrize(error_tolerance=ErrorTolerance.NONE) def test_skip_and_log_to_dlq(self, error_tolerance): self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk, topics=self.topics) # set config props self.override_error_tolerance_props = error_tolerance self.enable_deadletterqueue = True successful_records = [] faulty_records = [] records = [] for i in range(0, 1000): if i % 2 == 0: records.append('{"some_key":' + str(i) + '}') successful_records.append('{some_key=' + str(i) + '}') else: # badly formatted json records (missing a quote after the key) records.append('{"some_key:' + str(i) + '}') faulty_records.append('{"some_key:' + str(i) + '}') records = "\n".join(records) + "\n" successful_records = "\n".join(successful_records) + "\n" if error_tolerance == ErrorTolerance.ALL: faulty_records = ",".join(faulty_records) else: faulty_records = faulty_records[0] self.source = ConnectStandaloneService(self.test_context, self.kafka, [self.INPUT_FILE, self.OFFSETS_FILE]) self.sink = ConnectStandaloneService(self.test_context, self.kafka, [self.OUTPUT_FILE, self.OFFSETS_FILE]) self.zk.start() self.kafka.start() self.override_key_converter = "org.apache.kafka.connect.storage.StringConverter" self.override_value_converter = "org.apache.kafka.connect.storage.StringConverter" self.source.set_configs(lambda node: self.render("connect-standalone.properties", node=node), [self.render("connect-file-source.properties")]) self.override_key_converter = "org.apache.kafka.connect.json.JsonConverter" self.override_value_converter = "org.apache.kafka.connect.json.JsonConverter" self.override_key_converter_schemas_enable = False self.override_value_converter_schemas_enable = False self.sink.set_configs(lambda node: self.render("connect-standalone.properties", node=node), [self.render("connect-file-sink.properties")]) self.source.set_external_configs(lambda node: self.render("connect-file-external.properties", node=node)) self.sink.set_external_configs(lambda node: self.render("connect-file-external.properties", node=node)) self.source.start() self.sink.start() # Generating data on the source node should generate new records and create new output on the sink node self.source.node.account.ssh("echo -e -n " + repr(records) + " >> " + self.INPUT_FILE) if error_tolerance == ErrorTolerance.NONE: try: wait_until(lambda: self.validate_output(successful_records), timeout_sec=15, err_msg="Clean records added to input file were not seen in the output file in a reasonable amount of time.") raise Exception("Expected to not find any results in this file.") except TimeoutError: self.logger.info("Caught expected exception") else: wait_until(lambda: self.validate_output(successful_records), timeout_sec=15, err_msg="Clean records added to input file were not seen in the output file in a reasonable amount of time.") if self.enable_deadletterqueue: self.logger.info("Reading records from deadletterqueue") consumer_validator = ConsoleConsumer(self.test_context, 1, self.kafka, "my-connector-errors", consumer_timeout_ms=10000) consumer_validator.run() actual = ",".join(consumer_validator.messages_consumed[1]) assert faulty_records == actual, "Expected %s but saw %s in dead letter queue" % (faulty_records, actual)
# -*- coding: utf-8 -*- # TenderComplaintResourceTest def create_tender_complaint(self): response = self.app.post_json('/tenders/{}/complaints'.format( self.tender_id), { 'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author, 'status': 'claim'}}) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') complaint = response.json['data'] owner_token = response.json['access']['token'] self.assertEqual(complaint['author']['name'], self.test_author['name']) self.assertIn('id', complaint) self.assertIn(complaint['id'], response.headers['Location']) response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "status": "answered" }}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'], [ {u'description': [u'This field is required.'], u'location': u'body', u'name': u'resolutionType'}, ]) response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "status": "answered", "resolutionType": "invalid", "resolution": "spam 100% " * 3 }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "answered") self.assertEqual(response.json['data']["resolutionType"], "invalid") self.assertEqual(response.json['data']["resolution"], "spam 100% " * 3) response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "satisfied": True, "status": "resolved" }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "resolved") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": {"status": "cancelled", "cancellationReason": "reason"}}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't update complaint in current (resolved) status") self.set_status('unsuccessful') response = self.app.post_json('/tenders/{}/complaints'.format( self.tender_id), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author}}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't add complaint in current (unsuccessful) tender status") def patch_tender_complaint(self): response = self.app.post_json('/tenders/{}/complaints'.format( self.tender_id), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author}}) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') complaint = response.json['data'] owner_token = response.json['access']['token'] response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "status": "cancelled", "cancellationReason": "reason" }}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Forbidden") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "title": "claim title", }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.json['data']["title"], "claim title") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "status": "claim", }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.json['data']["status"], "claim") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "resolution": "changing rules" }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["resolution"], "changing rules") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "status": "answered", "resolutionType": "resolved", "resolution": "resolution text" * 2 }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "answered") self.assertEqual(response.json['data']["resolutionType"], "resolved") self.assertEqual(response.json['data']["resolution"], "resolution text" * 2) response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "satisfied": False }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["satisfied"], False) response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "status": "resolved" }}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't update complaint") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "status": "pending" }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "pending") response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "status": "stopping" }}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'], [ {u'description': [u'This field is required.'], u'location': u'body', u'name': u'cancellationReason'}, ]) response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "status": "stopping", "cancellationReason": "reason" }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "stopping") self.assertEqual(response.json['data']["cancellationReason"], "reason") response = self.app.patch_json('/tenders/{}/complaints/some_id'.format(self.tender_id), {"data": {"status": "resolved", "resolution": "resolution text"}}, status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'url', u'name': u'complaint_id'} ]) response = self.app.patch_json('/tenders/some_id/complaints/some_id', {"data": {"status": "resolved", "resolution": "resolution text"}}, status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'url', u'name': u'tender_id'} ]) response = self.app.get('/tenders/{}/complaints/{}'.format(self.tender_id, complaint['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "stopping") self.assertEqual(response.json['data']["cancellationReason"], "reason") self.assertEqual(response.json['data']["resolutionType"], "resolved") self.assertEqual(response.json['data']["resolution"], "resolution text" * 2) response = self.app.post_json('/tenders/{}/complaints'.format( self.tender_id), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author}}) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') complaint = response.json['data'] owner_token = response.json['access']['token'] self.set_status('complete') response = self.app.patch_json( '/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "status": "claim", }}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't update complaint in current (complete) tender status") def review_tender_complaint(self): for status in ['invalid', 'stopped', 'satisfied', 'declined']: self.app.authorization = ('Basic', ('broker', '')) response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), {'data': { 'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author, 'status': 'pending' }}) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') complaint = response.json['data'] self.app.authorization = ('Basic', ('reviewer', '')) response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint['id']), {"data": { "decision": '{} complaint'.format(status), 'rejectReasonDescription': 'reject reason' }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["decision"], '{} complaint'.format(status)) self.assertEqual(response.json['data']["rejectReasonDescription"], 'reject reason') if status in ['satisfied', 'declined']: response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint['id']), {"data": { "status": "accepted" }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "accepted") response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint['id']), {"data": { "decision": 'accepted:{} complaint'.format(status) }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["decision"], 'accepted:{} complaint'.format(status)) response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint['id']), {"data": { "status": status }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], status) def review_tender_stopping_complaint(self): for status in ['satisfied', 'stopped', 'declined', 'mistaken', 'invalid']: self.app.authorization = ('Basic', ('broker', '')) response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), { 'data': { 'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author, 'status': 'pending', } }) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') complaint = response.json['data'] owner_token = response.json['access']['token'] url_patch_complaint = '/tenders/{}/complaints/{}'.format(self.tender_id, complaint['id']) response = self.app.patch_json('{}?acc_token={}'.format(url_patch_complaint, owner_token), { 'data': { 'status': 'stopping', 'cancellationReason': 'reason', } }) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']['status'], 'stopping') self.assertEqual(response.json['data']['cancellationReason'], 'reason') self.app.authorization = ('Basic', ('reviewer', '')) response = self.app.patch_json(url_patch_complaint, { 'data': { 'decision': 'decision', 'status': status, } }) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']['status'], status) self.assertEqual(response.json['data']['decision'], 'decision') # TenderLotAwardComplaintResourceTest def create_tender_lot_complaint(self): response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), {'data': { 'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author, 'relatedLot': self.initial_lots[0]['id'], 'status': 'claim' }}) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') complaint = response.json['data'] owner_token = response.json['access']['token'] self.assertEqual(complaint['author']['name'], self.test_author['name']) self.assertIn('id', complaint) self.assertIn(complaint['id'], response.headers['Location']) response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "status": "answered" }}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'], [ {u'description': [u'This field is required.'], u'location': u'body', u'name': u'resolutionType'}, ]) response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": { "status": "answered", "resolutionType": "invalid", "resolution": "spam 100% " * 3 }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "answered") self.assertEqual(response.json['data']["resolutionType"], "invalid") self.assertEqual(response.json['data']["resolution"], "spam 100% " * 3) response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": { "satisfied": True, "status": "resolved" }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["status"], "resolved") response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], owner_token), {"data": {"status": "cancelled", "cancellationReason": "reason"}}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't update complaint in current (resolved) status") self.set_status('unsuccessful') response = self.app.post_json('/tenders/{}/complaints'.format( self.tender_id), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.test_author}}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't add complaint in current (unsuccessful) tender status") # TenderComplaintDocumentResourceTest def put_tender_complaint_document(self): response = self.app.post('/tenders/{}/complaints/{}/documents?acc_token={}'.format( self.tender_id, self.complaint_id, self.complaint_owner_token), upload_files=[('file', 'name.doc', 'content')]) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') doc_id = response.json["data"]['id'] self.assertIn(doc_id, response.headers['Location']) response = self.app.put('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format( self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), status=404, upload_files=[('invalid_name', 'name.doc', 'content')]) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'body', u'name': u'file'} ]) response = self.app.put('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format( self.tender_id, self.complaint_id, doc_id, self.tender_token), upload_files=[('file', 'name.doc', 'content2')], status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can update document only author") response = self.app.put('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format( self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), upload_files=[('file', 'name.doc', 'content2')]) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(doc_id, response.json["data"]["id"]) key = response.json["data"]["url"].split('?')[-1] response = self.app.get('/tenders/{}/complaints/{}/documents/{}?{}'.format( self.tender_id, self.complaint_id, doc_id, key)) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/msword') self.assertEqual(response.content_length, 8) self.assertEqual(response.body, 'content2') response = self.app.get('/tenders/{}/complaints/{}/documents/{}'.format( self.tender_id, self.complaint_id, doc_id)) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(doc_id, response.json["data"]["id"]) self.assertEqual('name.doc', response.json["data"]["title"]) response = self.app.put('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format( self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), 'content3', content_type='application/msword') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(doc_id, response.json["data"]["id"]) key = response.json["data"]["url"].split('?')[-1] response = self.app.get('/tenders/{}/complaints/{}/documents/{}?{}'.format( self.tender_id, self.complaint_id, doc_id, key)) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/msword') self.assertEqual(response.content_length, 8) self.assertEqual(response.body, 'content3') response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, self.complaint_id, self.complaint_owner_token), {"data": { "status": "pending", }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.json['data']["status"], "pending") response = self.app.put('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format(self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), 'content', content_type='application/msword') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.set_status('complete') response = self.app.put('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format( self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), upload_files=[('file', 'name.doc', 'content3')], status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) tender status") def patch_tender_complaint_document(self): response = self.app.post('/tenders/{}/complaints/{}/documents?acc_token={}'.format( self.tender_id, self.complaint_id, self.complaint_owner_token), upload_files=[('file', 'name.doc', 'content')]) self.assertEqual(response.status, '201 Created') self.assertEqual(response.content_type, 'application/json') doc_id = response.json["data"]['id'] self.assertIn(doc_id, response.headers['Location']) response = self.app.patch_json('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format( self.tender_id, self.complaint_id, doc_id, self.tender_token), {"data": {"description": "document description"}}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can update document only author") response = self.app.patch_json('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format(self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), {"data": {"description": "document description"}}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(doc_id, response.json["data"]["id"]) response = self.app.get('/tenders/{}/complaints/{}/documents/{}'.format( self.tender_id, self.complaint_id, doc_id)) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(doc_id, response.json["data"]["id"]) self.assertEqual('document description', response.json["data"]["description"]) response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, self.complaint_id, self.complaint_owner_token), {"data": { "status": "pending", }}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.json['data']["status"], "pending") response = self.app.patch_json('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format(self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), {"data": {"description": "document description2"}}) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data']["description"], "document description2") self.set_status('complete') response = self.app.patch_json('/tenders/{}/complaints/{}/documents/{}?acc_token={}'.format(self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token), {"data": {"description": "document description"}}, status=403) self.assertEqual(response.status, '403 Forbidden') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) tender status")
# yellowbrick.model_selection.validation_curve # Implements a visual validation curve for a hyperparameter. # # Author: Benjamin Bengfort <benjamin@bengfort.com> # Created: Sat Mar 31 06:27:28 2018 -0400 # # ID: validation_curve.py [] benjamin@bengfort.com $ """ Implements a visual validation curve for a hyperparameter. """ ########################################################################## ## Imports ########################################################################## import numpy as np from yellowbrick.base import ModelVisualizer from yellowbrick.style import resolve_colors from yellowbrick.exceptions import YellowbrickValueError from sklearn.model_selection import validation_curve as sk_validation_curve ########################################################################## ## ValidationCurve visualizer ########################################################################## class ValidationCurve(ModelVisualizer): """ Visualizes the validation curve for both test and training data for a range of values for a single hyperparameter of the model. Adjusting the value of a hyperparameter adjusts the complexity of a model. Less complex models suffer from increased error due to bias, while more complex models suffer from increased error due to variance. By inspecting the training and cross-validated test score error, it is possible to estimate a good value for a hyperparameter that balances the bias/variance trade-off. The visualizer evaluates cross-validated training and test scores for the different hyperparameters supplied. The curve is plotted so that the x-axis is the value of the hyperparameter and the y-axis is the model score. This is similar to a grid search with a single hyperparameter. The cross-validation generator splits the dataset k times, and scores are averaged over all k runs for the training and test subsets. The curve plots the mean score, and the filled in area suggests the variability of cross-validation by plotting one standard deviation above and below the mean for each split. Parameters ---------- model : a scikit-learn estimator An object that implements ``fit`` and ``predict``, can be a classifier, regressor, or clusterer so long as there is also a valid associated scoring metric. Note that the object is cloned for each validation. param_name : string Name of the parameter that will be varied. param_range : array-like, shape (n_values,) The values of the parameter that will be evaluated. ax : matplotlib.Axes object, optional The axes object to plot the figure on. logx : boolean, optional If True, plots the x-axis with a logarithmic scale. groups : array-like, with shape (n_samples,) Optional group labels for the samples used while splitting the dataset into train/test sets. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. see the scikit-learn `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_ for more information on the possible strategies that can be used here. scoring : string, callable or None, optional, default: None A string or scorer callable object / function with signature ``scorer(estimator, X, y)``. See scikit-learn model evaluation documentation for names of possible metrics. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. Attributes ---------- train_scores_ : array, shape (n_ticks, n_cv_folds) Scores on training sets. train_scores_mean_ : array, shape (n_ticks,) Mean training data scores for each training split train_scores_std_ : array, shape (n_ticks,) Standard deviation of training data scores for each training split test_scores_ : array, shape (n_ticks, n_cv_folds) Scores on test set. test_scores_mean_ : array, shape (n_ticks,) Mean test data scores for each test split test_scores_std_ : array, shape (n_ticks,) Standard deviation of test data scores for each test split Examples -------- >>> import numpy as np >>> from yellowbrick.model_selection import ValidationCurve >>> from sklearn.svm import SVC >>> pr = np.logspace(-6,-1,5) >>> model = ValidationCurve(SVC(), param_name="gamma", param_range=pr) >>> model.fit(X, y) >>> model.poof() Notes ----- This visualizer is essentially a wrapper for the ``sklearn.model_selection.validation_curve utility``, discussed in the `validation curves <http://scikit-learn.org/stable/modules/learning_curve.html#validation-curve>`_ documentation. .. seealso:: The documentation for the `validation_curve <http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html#sklearn.model_selection.validation_curve>`_ function, which this visualizer wraps. """ def __init__(self, model, param_name, param_range, ax=None, logx=False, groups=None, cv=None, scoring=None, n_jobs=1, pre_dispatch="all", **kwargs): # Initialize the model visualizer super(ValidationCurve, self).__init__(model, ax=ax, **kwargs) # Validate the param_range param_range = np.asarray(param_range) if param_range.ndim != 1: raise YellowbrickValueError( "must specify array of param values, '{}' is not valid".format( repr(param_range) )) # Set the visual and validation curve parameters on the estimator self.set_params( param_name=param_name, param_range=param_range, logx=logx, groups=groups, cv=cv, scoring=scoring, n_jobs=n_jobs, pre_dispatch=pre_dispatch, ) def fit(self, X, y=None): """ Fits the validation curve with the wrapped estimator and parameter array to the specified data. Draws training and test score curves and saves the scores to the visualizer. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- self : instance Returns the instance of the validation curve visualizer for use in pipelines and other sequential transformers. """ # arguments to pass to sk_validation_curve skvc_kwargs = { key: self.get_params()[key] for key in ( 'param_name', 'param_range', 'groups', 'cv', 'scoring', 'n_jobs', 'pre_dispatch', ) } # compute the validation curve and store scores curve = sk_validation_curve(self.estimator, X, y, **skvc_kwargs) self.train_scores_, self.test_scores_ = curve # compute the mean and standard deviation of the training data self.train_scores_mean_ = np.mean(self.train_scores_, axis=1) self.train_scores_std_ = np.std(self.train_scores_, axis=1) # compute the mean and standard deviation of the test data self.test_scores_mean_ = np.mean(self.test_scores_, axis=1) self.test_scores_std_ = np.std(self.test_scores_, axis=1) # draw the curves on the current axes self.draw() return self def draw(self, **kwargs): """ Renders the training and test curves. """ # Specify the curves to draw and their labels labels = ("Training Score", "Cross Validation Score") curves = ( (self.train_scores_mean_, self.train_scores_std_), (self.test_scores_mean_, self.test_scores_std_), ) # Get the colors for the train and test curves colors = resolve_colors(n_colors=2) # Plot the fill betweens first so they are behind the curves. for idx, (mean, std) in enumerate(curves): # Plot one standard deviation above and below the mean self.ax.fill_between( self.param_range, mean - std, mean+std, alpha=0.25, color=colors[idx], ) # Plot the mean curves so they are in front of the variance fill for idx, (mean, _) in enumerate(curves): self.ax.plot( self.param_range, mean, 'd-', color=colors[idx], label=labels[idx], ) if self.logx: self.ax.set_xscale('log') return self.ax def finalize(self, **kwargs): """ Add the title, legend, and other visual final touches to the plot. """ # Set the title of the figure self.set_title('Validation Curve for {}'.format(self.name)) # Add the legend self.ax.legend(frameon=True, loc='best') # Set the axis labels self.ax.set_xlabel(self.param_name) self.ax.set_ylabel('score') ########################################################################## ## Quick Method ########################################################################## def validation_curve(model, X, y, param_name, param_range, ax=None, logx=False, groups=None, cv=None, scoring=None, n_jobs=1, pre_dispatch="all", **kwargs): """ Displays a validation curve for the specified param and values, plotting both the train and cross-validated test scores. The validation curve is a visual, single-parameter grid search used to tune a model to find the best balance between error due to bias and error due to variance. This helper function is a wrapper to use the ValidationCurve in a fast, visual analysis. Parameters ---------- model : a scikit-learn estimator An object that implements ``fit`` and ``predict``, can be a classifier, regressor, or clusterer so long as there is also a valid associated scoring metric. Note that the object is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. param_name : string Name of the parameter that will be varied. param_range : array-like, shape (n_values,) The values of the parameter that will be evaluated. ax : matplotlib.Axes object, optional The axes object to plot the figure on. logx : boolean, optional If True, plots the x-axis with a logarithmic scale. groups : array-like, with shape (n_samples,) Optional group labels for the samples used while splitting the dataset into train/test sets. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. see the scikit-learn `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_ for more information on the possible strategies that can be used here. scoring : string, callable or None, optional, default: None A string or scorer callable object / function with signature ``scorer(estimator, X, y)``. See scikit-learn model evaluation documentation for names of possible metrics. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. These arguments are also passed to the `poof()` method, e.g. can pass a path to save the figure to. Returns ------- ax : matplotlib.Axes The axes object that the validation curves were drawn on. """ # Initialize the visualizer oz = ValidationCurve( model, param_name, param_range, ax=ax, logx=logx, groups=groups, cv=cv, scoring=scoring, n_jobs=n_jobs, pre_dispatch=pre_dispatch ) # Fit and poof the visualizer oz.fit(X, y) oz.poof(**kwargs) return oz.ax
# Copyright (c) 2010 Cloud.com, Inc # Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for basic VM operations. """ import contextlib import functools import os import time from eventlet import timeout as etimeout from os_win import constants as os_win_const from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import units from oslo_utils import uuidutils from nova.api.metadata import base as instance_metadata from nova.compute import vm_states import nova.conf from nova import exception from nova.i18n import _ from nova import objects from nova.objects import fields from nova.virt import configdrive from nova.virt import hardware from nova.virt.hyperv import block_device_manager from nova.virt.hyperv import constants from nova.virt.hyperv import imagecache from nova.virt.hyperv import pathutils from nova.virt.hyperv import serialconsoleops from nova.virt.hyperv import vif as vif_utils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) CONF = nova.conf.CONF SHUTDOWN_TIME_INCREMENT = 5 REBOOT_TYPE_SOFT = 'SOFT' REBOOT_TYPE_HARD = 'HARD' VM_GENERATIONS = { constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2 } VM_GENERATIONS_CONTROLLER_TYPES = { constants.VM_GEN_1: constants.CTRL_TYPE_IDE, constants.VM_GEN_2: constants.CTRL_TYPE_SCSI } def check_admin_permissions(function): @functools.wraps(function) def wrapper(self, *args, **kwds): # Make sure the windows account has the required admin permissions. self._vmutils.check_admin_permissions() return function(self, *args, **kwds) return wrapper class VMOps(object): # The console log is stored in two files, each should have at most half of # the maximum console log size. _MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2 _ROOT_DISK_CTRL_ADDR = 0 def __init__(self, virtapi=None): self._virtapi = virtapi self._vmutils = utilsfactory.get_vmutils() self._metricsutils = utilsfactory.get_metricsutils() self._vhdutils = utilsfactory.get_vhdutils() self._hostutils = utilsfactory.get_hostutils() self._migrutils = utilsfactory.get_migrationutils() self._pathutils = pathutils.PathUtils() self._volumeops = volumeops.VolumeOps() self._imagecache = imagecache.ImageCache() self._serial_console_ops = serialconsoleops.SerialConsoleOps() self._block_dev_man = ( block_device_manager.BlockDeviceInfoManager()) self._vif_driver = vif_utils.HyperVVIFDriver() def list_instance_uuids(self): instance_uuids = [] for (instance_name, notes) in self._vmutils.list_instance_notes(): if notes and uuidutils.is_uuid_like(notes[0]): instance_uuids.append(str(notes[0])) else: LOG.debug("Notes not found or not resembling a GUID for " "instance: %s", instance_name) return instance_uuids def list_instances(self): return self._vmutils.list_instances() def get_info(self, instance): """Get information about the VM.""" LOG.debug("get_info called for instance", instance=instance) instance_name = instance.name if not self._vmutils.vm_exists(instance_name): raise exception.InstanceNotFound(instance_id=instance.uuid) info = self._vmutils.get_vm_summary_info(instance_name) state = constants.HYPERV_POWER_STATE[info['EnabledState']] return hardware.InstanceInfo(state=state) def _create_root_device(self, context, instance, root_disk_info, vm_gen): path = None if root_disk_info['type'] == constants.DISK: path = self._create_root_vhd(context, instance) self.check_vm_image_type(instance.uuid, vm_gen, path) root_disk_info['path'] = path def _create_root_vhd(self, context, instance, rescue_image_id=None): is_rescue_vhd = rescue_image_id is not None base_vhd_path = self._imagecache.get_cached_image(context, instance, rescue_image_id) base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) base_vhd_size = base_vhd_info['VirtualSize'] format_ext = base_vhd_path.split('.')[-1] root_vhd_path = self._pathutils.get_root_vhd_path(instance.name, format_ext, is_rescue_vhd) root_vhd_size = instance.flavor.root_gb * units.Gi try: if CONF.use_cow_images: LOG.debug("Creating differencing VHD. Parent: " "%(base_vhd_path)s, Target: %(root_vhd_path)s", {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._vhdutils.create_differencing_vhd(root_vhd_path, base_vhd_path) vhd_type = self._vhdutils.get_vhd_format(base_vhd_path) if vhd_type == constants.DISK_FORMAT_VHD: # The base image has already been resized. As differencing # vhdx images support it, the root image will be resized # instead if needed. return root_vhd_path else: LOG.debug("Copying VHD image %(base_vhd_path)s to target: " "%(root_vhd_path)s", {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._pathutils.copyfile(base_vhd_path, root_vhd_path) root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( base_vhd_path, root_vhd_size)) if not is_rescue_vhd and self._is_resize_needed( root_vhd_path, base_vhd_size, root_vhd_internal_size, instance): self._vhdutils.resize_vhd(root_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(root_vhd_path): self._pathutils.remove(root_vhd_path) return root_vhd_path def _is_resize_needed(self, vhd_path, old_size, new_size, instance): if new_size < old_size: raise exception.FlavorDiskSmallerThanImage( flavor_size=new_size, image_size=old_size) elif new_size > old_size: LOG.debug("Resizing VHD %(vhd_path)s to new " "size %(new_size)s", {'new_size': new_size, 'vhd_path': vhd_path}, instance=instance) return True return False def _create_ephemerals(self, instance, ephemerals): for index, eph in enumerate(ephemerals): eph['format'] = self._vhdutils.get_best_supported_vhd_format() eph_name = "eph%s" % index eph['path'] = self._pathutils.get_ephemeral_vhd_path( instance.name, eph['format'], eph_name) self.create_ephemeral_disk(instance.name, eph) def create_ephemeral_disk(self, instance_name, eph_info): self._vhdutils.create_dynamic_vhd(eph_info['path'], eph_info['size'] * units.Gi) @staticmethod def _get_vif_metadata(context, instance_id): vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, instance_id) vif_metadata = [] for vif in vifs: if 'tag' in vif and vif.tag: device = objects.NetworkInterfaceMetadata( mac=vif.address, bus=objects.PCIDeviceBus(), tags=[vif.tag]) vif_metadata.append(device) return vif_metadata def _save_device_metadata(self, context, instance, block_device_info): """Builds a metadata object for instance devices, that maps the user provided tag to the hypervisor assigned device address. """ metadata = [] metadata.extend(self._get_vif_metadata(context, instance.uuid)) if block_device_info: metadata.extend(self._block_dev_man.get_bdm_metadata( context, instance, block_device_info)) if metadata: instance.device_metadata = objects.InstanceDeviceMetadata( devices=metadata) def set_boot_order(self, instance_name, vm_gen, block_device_info): boot_order = self._block_dev_man.get_boot_order( vm_gen, block_device_info) LOG.debug("Setting boot order for instance: %(instance_name)s: " "%(boot_order)s", {'instance_name': instance_name, 'boot_order': boot_order}) self._vmutils.set_boot_order(instance_name, boot_order) @check_admin_permissions def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info("Spawning new instance", instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) self._block_dev_man.validate_and_update_bdi( instance, image_meta, vm_gen, block_device_info) root_device = block_device_info['root_disk'] self._create_root_device(context, instance, root_device, vm_gen) self._create_ephemerals(instance, block_device_info['ephemerals']) try: with self.wait_vif_plug_events(instance, network_info): # waiting will occur after the instance is created. self.create_instance(instance, network_info, root_device, block_device_info, vm_gen, image_meta) # This is supported starting from OVS version 2.5 self.plug_vifs(instance, network_info) self._save_device_metadata(context, instance, block_device_info) if configdrive.required_by(instance): configdrive_path = self._create_config_drive(context, instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.set_boot_order(instance.name, vm_gen, block_device_info) # vifs are already plugged in at this point. We waited on the vif # plug event previously when we created the instance. Skip the # plug vifs during power on in this case self.power_on(instance, network_info=network_info, should_plug_vifs=False) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance, network_info, block_device_info) @contextlib.contextmanager def wait_vif_plug_events(self, instance, network_info): timeout = CONF.vif_plugging_timeout try: # NOTE(claudiub): async calls to bind the neutron ports will be # done when network_info is being accessed. events = self._get_neutron_events(network_info) with self._virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=self._neutron_failed_callback): yield except etimeout.Timeout: # We never heard from Neutron LOG.warning('Timeout waiting for vif plugging callback for ' 'instance.', instance=instance) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() except exception.PortBindingFailed: LOG.warning( "Neutron failed to bind a port to this host. Make sure that " "an L2 agent is alive and registered from this node (neutron " "Open vSwitch agent or Hyper-V agent), or make sure that " "neutron is configured with a mechanism driver that is able " "to bind ports to this host (OVN). If you are using neutron " "Hyper-V agent, make sure that networking-hyperv is installed " "on the neutron controller, and that the neutron-server was " "configured to use the 'hyperv' mechanism_driver.") raise def _neutron_failed_callback(self, event_name, instance): LOG.error('Neutron Reported failure on event %s', event_name, instance=instance) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() def _get_neutron_events(self, network_info): # NOTE(danms): We need to collect any VIFs that are currently # down that we expect a down->up event for. Anything that is # already up will not undergo that transition, and for # anything that might be stale (cache-wise) assume it's # already up so we don't block on it. if CONF.vif_plugging_timeout: return [('network-vif-plugged', vif['id']) for vif in network_info if vif.get('active') is False] return [] def create_instance(self, instance, network_info, root_device, block_device_info, vm_gen, image_meta): instance_name = instance.name instance_path = os.path.join(CONF.instances_path, instance_name) secure_boot_enabled = self._requires_secure_boot(instance, image_meta, vm_gen) memory_per_numa_node, cpus_per_numa_node = ( self._get_instance_vnuma_config(instance, image_meta)) if memory_per_numa_node: LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning " "has to be disabled in order for the instance to " "benefit from it.", instance=instance) if CONF.hyperv.dynamic_memory_ratio > 1.0: LOG.warning( "Instance vNUMA topology requested, but dynamic memory " "ratio is higher than 1.0 in nova.conf. Ignoring dynamic " "memory ratio option.", instance=instance) dynamic_memory_ratio = 1.0 vnuma_enabled = True else: dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio vnuma_enabled = False if instance.pci_requests.requests: # NOTE(claudiub): if the instance requires PCI devices, its # host shutdown action MUST be shutdown. host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN else: host_shutdown_action = None self._vmutils.create_vm(instance_name, vnuma_enabled, vm_gen, instance_path, [instance.uuid]) self._vmutils.update_vm(instance_name, instance.flavor.memory_mb, memory_per_numa_node, instance.flavor.vcpus, cpus_per_numa_node, CONF.hyperv.limit_cpu_features, dynamic_memory_ratio, host_shutdown_action=host_shutdown_action) self._configure_remotefx(instance, vm_gen) self._vmutils.create_scsi_controller(instance_name) self._attach_root_device(instance_name, root_device) self._attach_ephemerals(instance_name, block_device_info['ephemerals']) self._volumeops.attach_volumes( block_device_info['block_device_mapping'], instance_name) # For the moment, we use COM port 1 when getting the serial console # log as well as interactive sessions. In the future, the way in which # we consume instance serial ports may become configurable. # # Note that Hyper-V instances will always have 2 COM ports serial_ports = { constants.DEFAULT_SERIAL_CONSOLE_PORT: constants.SERIAL_PORT_TYPE_RW} self._create_vm_com_port_pipes(instance, serial_ports) for vif in network_info: LOG.debug('Creating nic for instance', instance=instance) self._vmutils.create_nic(instance_name, vif['id'], vif['address']) if CONF.hyperv.enable_instance_metrics_collection: self._metricsutils.enable_vm_metrics_collection(instance_name) self._set_instance_disk_qos_specs(instance) if secure_boot_enabled: certificate_required = self._requires_certificate(image_meta) self._vmutils.enable_secure_boot( instance.name, msft_ca_required=certificate_required) self._attach_pci_devices(instance) def _attach_pci_devices(self, instance): for pci_request in instance.pci_requests.requests: spec = pci_request.spec[0] for counter in range(pci_request.count): self._vmutils.add_pci_device(instance.name, spec['vendor_id'], spec['product_id']) def _get_instance_vnuma_config(self, instance, image_meta): """Returns the appropriate NUMA configuration for Hyper-V instances, given the desired instance NUMA topology. :param instance: instance containing the flavor and it's extra_specs, where the NUMA topology is defined. :param image_meta: image's metadata, containing properties related to the instance's NUMA topology. :returns: memory amount and number of vCPUs per NUMA node or (None, None), if instance NUMA topology was not requested. :raises exception.InstanceUnacceptable: If the given instance NUMA topology is not possible on Hyper-V, or if CPU pinning is required. """ instance_topology = hardware.numa_get_constraints(instance.flavor, image_meta) if not instance_topology: # instance NUMA topology was not requested. return None, None memory_per_numa_node = instance_topology.cells[0].memory cpus_per_numa_node = len(instance_topology.cells[0].cpuset) if instance_topology.cpu_pinning_requested: raise exception.InstanceUnacceptable( reason=_("Hyper-V does not support CPU pinning."), instance_id=instance.uuid) # validate that the requested NUMA topology is not asymetric. # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. # same with memory. for cell in instance_topology.cells: if len(cell.cpuset) != cpus_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven number of processors. (%(a)s != %(b)s)") % { 'a': len(cell.cpuset), 'b': cpus_per_numa_node} raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) if cell.memory != memory_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven amounts of memory. (%(a)s != %(b)s)") % { 'a': cell.memory, 'b': memory_per_numa_node} raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) return memory_per_numa_node, cpus_per_numa_node def _configure_remotefx(self, instance, vm_gen): extra_specs = instance.flavor.extra_specs remotefx_max_resolution = extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_RES) if not remotefx_max_resolution: # RemoteFX not required. return if not CONF.hyperv.enable_remotefx: raise exception.InstanceUnacceptable( _("enable_remotefx configuration option needs to be set to " "True in order to use RemoteFX.")) if not self._hostutils.check_server_feature( self._hostutils.FEATURE_RDS_VIRTUALIZATION): raise exception.InstanceUnacceptable( _("The RDS-Virtualization feature must be installed in order " "to use RemoteFX.")) if not self._vmutils.vm_gen_supports_remotefx(vm_gen): raise exception.InstanceUnacceptable( _("RemoteFX is not supported on generation %s virtual " "machines on this version of Windows.") % vm_gen) instance_name = instance.name LOG.debug('Configuring RemoteFX for instance: %s', instance_name) remotefx_monitor_count = int(extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1) remotefx_vram = extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_VRAM) vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None self._vmutils.enable_remotefx_video_adapter( instance_name, remotefx_monitor_count, remotefx_max_resolution, vram_bytes) def _attach_root_device(self, instance_name, root_dev_info): if root_dev_info['type'] == constants.VOLUME: self._volumeops.attach_volume(root_dev_info['connection_info'], instance_name, disk_bus=root_dev_info['disk_bus']) else: self._attach_drive(instance_name, root_dev_info['path'], root_dev_info['drive_addr'], root_dev_info['ctrl_disk_addr'], root_dev_info['disk_bus'], root_dev_info['type']) def _attach_ephemerals(self, instance_name, ephemerals): for eph in ephemerals: # if an ephemeral doesn't have a path, it might have been removed # during resize. if eph.get('path'): self._attach_drive( instance_name, eph['path'], eph['drive_addr'], eph['ctrl_disk_addr'], eph['disk_bus'], constants.BDI_DEVICE_TYPE_TO_DRIVE_TYPE[ eph['device_type']]) def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr, controller_type, drive_type=constants.DISK): if controller_type == constants.CTRL_TYPE_SCSI: self._vmutils.attach_scsi_drive(instance_name, path, drive_type) else: self._vmutils.attach_ide_drive(instance_name, path, drive_addr, ctrl_disk_addr, drive_type) def get_image_vm_generation(self, instance_id, image_meta): default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_meta.properties.get('hw_machine_type', default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): reason = _('Requested VM Generation %s is not supported on ' 'this OS.') % image_prop_vm raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) return VM_GENERATIONS[image_prop_vm] def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format( root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) def _requires_certificate(self, image_meta): os_type = image_meta.properties.get('os_type') if os_type == fields.OSType.WINDOWS: return False return True def _requires_secure_boot(self, instance, image_meta, vm_gen): """Checks whether the given instance requires Secure Boot. Secure Boot feature will be enabled by setting the "os_secure_boot" image property or the "os:secure_boot" flavor extra spec to required. :raises exception.InstanceUnacceptable: if the given image_meta has no os_type property set, or if the image property value and the flavor extra spec value are conflicting, or if Secure Boot is required, but the instance's VM generation is 1. """ img_secure_boot = image_meta.properties.get('os_secure_boot') flavor_secure_boot = instance.flavor.extra_specs.get( constants.FLAVOR_SPEC_SECURE_BOOT) requires_sb = False conflicting_values = False if flavor_secure_boot == fields.SecureBoot.REQUIRED: requires_sb = True if img_secure_boot == fields.SecureBoot.DISABLED: conflicting_values = True elif img_secure_boot == fields.SecureBoot.REQUIRED: requires_sb = True if flavor_secure_boot == fields.SecureBoot.DISABLED: conflicting_values = True if conflicting_values: reason = _( "Conflicting image metadata property and flavor extra_specs " "values: os_secure_boot (%(image_secure_boot)s) / " "os:secure_boot (%(flavor_secure_boot)s)") % { 'image_secure_boot': img_secure_boot, 'flavor_secure_boot': flavor_secure_boot} raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) if requires_sb: if vm_gen != constants.VM_GEN_2: reason = _('Secure boot requires generation 2 VM.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) os_type = image_meta.properties.get('os_type') if not os_type: reason = _('For secure boot, os_type must be specified in ' 'image properties.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) return requires_sb def _create_config_drive(self, context, instance, injected_files, admin_password, network_info, rescue=False): if CONF.config_drive_format != 'iso9660': raise exception.ConfigDriveUnsupportedFormat( format=CONF.config_drive_format) LOG.info('Using config drive for instance', instance=instance) extra_md = {} if admin_password and CONF.hyperv.config_drive_inject_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata( instance, content=injected_files, extra_md=extra_md, network_info=network_info, request_context=context) configdrive_path_iso = self._pathutils.get_configdrive_path( instance.name, constants.DVD_FORMAT, rescue=rescue) LOG.info('Creating config drive at %(path)s', {'path': configdrive_path_iso}, instance=instance) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: try: cdb.make_drive(configdrive_path_iso) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error('Creating config drive failed with ' 'error: %s', e, instance=instance) if not CONF.hyperv.config_drive_cdrom: configdrive_path = self._pathutils.get_configdrive_path( instance.name, constants.DISK_FORMAT_VHD, rescue=rescue) processutils.execute(CONF.hyperv.qemu_img_cmd, 'convert', '-f', 'raw', '-O', 'vpc', configdrive_path_iso, configdrive_path, attempts=1) self._pathutils.remove(configdrive_path_iso) else: configdrive_path = configdrive_path_iso return configdrive_path def attach_config_drive(self, instance, configdrive_path, vm_gen): configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):] # Do the attach here and if there is a certain file format that isn't # supported in constants.DISK_FORMAT_MAP then bomb out. try: drive_type = constants.DISK_FORMAT_MAP[configdrive_ext] controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._attach_drive(instance.name, configdrive_path, 1, 0, controller_type, drive_type) except KeyError: raise exception.InvalidDiskFormat(disk_format=configdrive_ext) def _detach_config_drive(self, instance_name, rescue=False, delete=False): configdrive_path = self._pathutils.lookup_configdrive_path( instance_name, rescue=rescue) if configdrive_path: self._vmutils.detach_vm_disk(instance_name, configdrive_path, is_physical=False) if delete: self._pathutils.remove(configdrive_path) @serialconsoleops.instance_synchronized def _delete_disk_files(self, instance_name): # We want to avoid the situation in which serial console workers # are started while we perform this operation, preventing us from # deleting the instance log files (bug #1556189). This can happen # due to delayed instance lifecycle events. # # The unsynchronized method is being used to avoid a deadlock. self._serial_console_ops.stop_console_handler_unsync(instance_name) self._pathutils.get_instance_dir(instance_name, create_dir=False, remove_dir=True) def destroy(self, instance, network_info, block_device_info, destroy_disks=True): instance_name = instance.name LOG.info("Got request to destroy instance", instance=instance) try: if self._vmutils.vm_exists(instance_name): # Stop the VM first. self._vmutils.stop_vm_jobs(instance_name) self.power_off(instance) self._vmutils.destroy_vm(instance_name) elif self._migrutils.planned_vm_exists(instance_name): self._migrutils.destroy_existing_planned_vm(instance_name) else: LOG.debug("Instance not found", instance=instance) # NOTE(claudiub): The vifs should be unplugged and the volumes # should be disconnected even if the VM doesn't exist anymore, # so they are not leaked. self.unplug_vifs(instance, network_info) self._volumeops.disconnect_volumes(block_device_info) if destroy_disks: self._delete_disk_files(instance_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_('Failed to destroy instance: %s'), instance_name) def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" LOG.debug("Rebooting instance", instance=instance) if reboot_type == REBOOT_TYPE_SOFT: if self._soft_shutdown(instance): self.power_on(instance, network_info=network_info) return self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_REBOOT) def _soft_shutdown(self, instance, timeout=CONF.hyperv.wait_soft_reboot_seconds, retry_interval=SHUTDOWN_TIME_INCREMENT): """Perform a soft shutdown on the VM. :return: True if the instance was shutdown within time limit, False otherwise. """ LOG.debug("Performing Soft shutdown on instance", instance=instance) while timeout > 0: # Perform a soft shutdown on the instance. # Wait maximum timeout for the instance to be shutdown. # If it was not shutdown, retry until it succeeds or a maximum of # time waited is equal to timeout. wait_time = min(retry_interval, timeout) try: LOG.debug("Soft shutdown instance, timeout remaining: %d", timeout, instance=instance) self._vmutils.soft_shutdown_vm(instance.name) if self._wait_for_power_off(instance.name, wait_time): LOG.info("Soft shutdown succeeded.", instance=instance) return True except os_win_exc.HyperVException as e: # Exception is raised when trying to shutdown the instance # while it is still booting. LOG.debug("Soft shutdown failed: %s", e, instance=instance) time.sleep(wait_time) timeout -= retry_interval LOG.warning("Timed out while waiting for soft shutdown.", instance=instance) return False def pause(self, instance): """Pause VM instance.""" LOG.debug("Pause instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_PAUSED) def unpause(self, instance): """Unpause paused VM instance.""" LOG.debug("Unpause instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) def suspend(self, instance): """Suspend the specified instance.""" LOG.debug("Suspend instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_SUSPENDED) def resume(self, instance): """Resume the suspended VM instance.""" LOG.debug("Resume instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" LOG.debug("Power off instance", instance=instance) # We must make sure that the console log workers are stopped, # otherwise we won't be able to delete or move the VM log files. self._serial_console_ops.stop_console_handler(instance.name) if retry_interval <= 0: retry_interval = SHUTDOWN_TIME_INCREMENT try: if timeout and self._soft_shutdown(instance, timeout, retry_interval): return self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_DISABLED) except os_win_exc.HyperVVMNotFoundException: # The manager can call the stop API after receiving instance # power off events. If this is triggered when the instance # is being deleted, it might attempt to power off an unexisting # instance. We'll just pass in this case. LOG.debug("Instance not found. Skipping power off", instance=instance) def power_on(self, instance, block_device_info=None, network_info=None, should_plug_vifs=True): """Power on the specified instance.""" LOG.debug("Power on instance", instance=instance) if block_device_info: self._volumeops.fix_instance_volume_disk_paths(instance.name, block_device_info) if should_plug_vifs: self.plug_vifs(instance, network_info) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) def _set_vm_state(self, instance, req_state): instance_name = instance.name try: self._vmutils.set_vm_state(instance_name, req_state) LOG.debug("Successfully changed state of VM %(instance_name)s" " to: %(req_state)s", {'instance_name': instance_name, 'req_state': req_state}) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to change vm state of %(instance_name)s" " to %(req_state)s", {'instance_name': instance_name, 'req_state': req_state}) def _get_vm_state(self, instance_name): summary_info = self._vmutils.get_vm_summary_info(instance_name) return summary_info['EnabledState'] def _wait_for_power_off(self, instance_name, time_limit): """Waiting for a VM to be in a disabled state. :return: True if the instance is shutdown within time_limit, False otherwise. """ desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED] def _check_vm_status(instance_name): if self._get_vm_state(instance_name) in desired_vm_states: raise loopingcall.LoopingCallDone() periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status, instance_name) try: # add a timeout to the periodic call. periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT) etimeout.with_timeout(time_limit, periodic_call.wait) except etimeout.Timeout: # VM did not shutdown in the expected time_limit. return False finally: # stop the periodic call, in case of exceptions or Timeout. periodic_call.stop() return True def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """Resume guest state when a host is booted.""" self.power_on(instance, block_device_info, network_info) def _create_vm_com_port_pipes(self, instance, serial_ports): for port_number, port_type in serial_ports.items(): pipe_path = r'\\.\pipe\%s_%s' % (instance.uuid, port_type) self._vmutils.set_vm_serial_port_connection( instance.name, port_number, pipe_path) def copy_vm_dvd_disks(self, vm_name, dest_host): dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name) dest_path = self._pathutils.get_instance_dir( vm_name, remote_server=dest_host) for path in dvd_disk_paths: self._pathutils.copyfile(path, dest_path) def plug_vifs(self, instance, network_info): if network_info: for vif in network_info: self._vif_driver.plug(instance, vif) def unplug_vifs(self, instance, network_info): if network_info: for vif in network_info: self._vif_driver.unplug(instance, vif) def _check_hotplug_available(self, instance): """Check whether attaching an interface is possible for the given instance. :returns: True if attaching / detaching interfaces is possible for the given instance. """ vm_state = self._get_vm_state(instance.name) if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED: # can attach / detach interface to stopped VMs. return True if not self._hostutils.check_min_windows_version(10, 0): # TODO(claudiub): add set log level to error after string freeze. LOG.debug("vNIC hot plugging is supported only in newer " "versions than Windows Hyper-V / Server 2012 R2.") return False if (self._vmutils.get_vm_generation(instance.name) == constants.VM_GEN_1): # TODO(claudiub): add set log level to error after string freeze. LOG.debug("Cannot hot plug vNIC to a first generation VM.", instance=instance) return False return True def attach_interface(self, instance, vif): if not self._check_hotplug_available(instance): raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) LOG.debug('Attaching vif: %s', vif['id'], instance=instance) self._vmutils.create_nic(instance.name, vif['id'], vif['address']) self._vif_driver.plug(instance, vif) def detach_interface(self, instance, vif): try: if not self._check_hotplug_available(instance): raise exception.InterfaceDetachFailed( instance_uuid=instance.uuid) LOG.debug('Detaching vif: %s', vif['id'], instance=instance) self._vif_driver.unplug(instance, vif) self._vmutils.destroy_nic(instance.name, vif['id']) except os_win_exc.HyperVVMNotFoundException: # TODO(claudiub): add set log level to error after string freeze. LOG.debug("Instance not found during detach interface. It " "might have been destroyed beforehand.", instance=instance) raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid) def rescue_instance(self, context, instance, network_info, image_meta, rescue_password): try: self._rescue_instance(context, instance, network_info, image_meta, rescue_password) except Exception as exc: with excutils.save_and_reraise_exception(): LOG.error("Instance rescue failed. Exception: %(exc)s. " "Attempting to unrescue the instance.", {'exc': exc}, instance=instance) self.unrescue_instance(instance) def _rescue_instance(self, context, instance, network_info, image_meta, rescue_password): rescue_image_id = image_meta.id or instance.image_ref rescue_vhd_path = self._create_root_vhd( context, instance, rescue_image_id=rescue_image_id) rescue_vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) vm_gen = self._vmutils.get_vm_generation(instance.name) if rescue_vm_gen != vm_gen: err_msg = _('The requested rescue image requires a different VM ' 'generation than the actual rescued instance. ' 'Rescue image VM generation: %(rescue_vm_gen)s. ' 'Rescued instance VM generation: %(vm_gen)s.') % dict( rescue_vm_gen=rescue_vm_gen, vm_gen=vm_gen) raise exception.ImageUnacceptable(reason=err_msg, image_id=rescue_image_id) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) if not root_vhd_path: err_msg = _('Instance root disk image could not be found. ' 'Rescuing instances booted from volume is ' 'not supported.') raise exception.InstanceNotRescuable(reason=err_msg, instance_id=instance.uuid) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) self._attach_drive(instance.name, rescue_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._vmutils.attach_scsi_drive(instance.name, root_vhd_path, drive_type=constants.DISK) if configdrive.required_by(instance): self._detach_config_drive(instance.name) rescue_configdrive_path = self._create_config_drive( context, instance, injected_files=None, admin_password=rescue_password, network_info=network_info, rescue=True) self.attach_config_drive(instance, rescue_configdrive_path, vm_gen) self.power_on(instance) def unrescue_instance(self, instance): self.power_off(instance) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True) if (instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path)): err_msg = _('Missing instance root and/or rescue image. ' 'The instance cannot be unrescued.') raise exception.InstanceNotRescuable(reason=err_msg, instance_id=instance.uuid) vm_gen = self._vmutils.get_vm_generation(instance.name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) if rescue_vhd_path: self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False) fileutils.delete_if_exists(rescue_vhd_path) self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._detach_config_drive(instance.name, rescue=True, delete=True) # Reattach the configdrive, if exists and not already attached. configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path and not self._vmutils.is_disk_attached( configdrive_path, is_physical=False): self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance) def _set_instance_disk_qos_specs(self, instance): quota_specs = self._get_scoped_flavor_extra_specs(instance, 'quota') disk_total_bytes_sec = int( quota_specs.get('disk_total_bytes_sec') or 0) disk_total_iops_sec = int( quota_specs.get('disk_total_iops_sec') or self._volumeops.bytes_per_sec_to_iops(disk_total_bytes_sec)) if disk_total_iops_sec: local_disks = self._get_instance_local_disks(instance.name) for disk_path in local_disks: self._vmutils.set_disk_qos_specs(disk_path, disk_total_iops_sec) def _get_instance_local_disks(self, instance_name): instance_path = self._pathutils.get_instance_dir(instance_name) instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0] local_disks = [disk_path for disk_path in instance_disks if instance_path in disk_path] return local_disks def _get_scoped_flavor_extra_specs(self, instance, scope): extra_specs = instance.flavor.extra_specs or {} filtered_specs = {} for spec, value in extra_specs.items(): if ':' in spec: _scope, key = spec.split(':') if _scope == scope: filtered_specs[key] = value return filtered_specs
#!/usr/bin/env python # encoding: utf-8 """ Copyright (c) 2010 The Echo Nest. All rights reserved. Created by Tyler Williams on 2010-04-25. The Artist module loosely covers http://developer.echonest.com/docs/v4/artist.html Refer to the official api documentation if you are unsure about something. """ import util from proxies import ArtistProxy, ResultList from song import Song class Artist(ArtistProxy): """ An Artist object Attributes: id (str): Echo Nest Artist ID name (str): Artist Name audio (list): Artist audio biographies (list): Artist biographies blogs (list): Artist blogs familiarity (float): Artist familiarity hotttnesss (float): Artist hotttnesss images (list): Artist images news (list): Artist news reviews (list): Artist reviews similar (list): Similar Artists songs (list): A list of song objects terms (list): Terms for an artist urls (list): Artist urls video (list): Artist video years_active (list): A list of dictionaries containing start and stop years You create an artist object like this: >>> a = artist.Artist('ARH6W4X1187B99274F') >>> a = artist.Artist('the national') >>> a = artist.Artist('musicbrainz:artist:a74b1b7f-71a5-4011-9441-d0b5e4122711') """ def __init__(self, id, **kwargs): """ Artist class Args: id (str): an artistw ID Returns: An artist object Example: >>> a = artist.Artist('ARH6W4X1187B99274F', buckets=['hotttnesss']) >>> a.hotttnesss 0.80098515900997658 >>> """ super(Artist, self).__init__(id, **kwargs) def __repr__(self): return "<%s - %s>" % (self._object_type.encode('utf-8'), self.name.encode('utf-8')) def __str__(self): return self.name.encode('utf-8') def __cmp__(self, other): return cmp(self.id, other.id) def get_audio(self, results=15, start=0, cache=True): """Get a list of audio documents found on the web related to an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of audio document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('alphabeat') >>> a.get_audio()[0] {u'artist': u'Alphabeat', u'date': u'2010-04-28T01:40:45', u'id': u'70be4373fa57ac2eee8c7f30b0580899', u'length': 210.0, u'link': u'http://iamthecrime.com', u'release': u'The Beat Is...', u'title': u'DJ', u'url': u'http://iamthecrime.com/wp-content/uploads/2010/04/03_DJ_iatc.mp3'} >>> """ if cache and ('audio' in self.cache) and results==15 and start==0: return self.cache['audio'] else: response = self.get_attribute('audio', results=results, start=start) if results==15 and start==0: self.cache['audio'] = ResultList(response['audio'], 0, response['total']) return ResultList(response['audio'], start, response['total']) audio = property(get_audio) def get_biographies(self, results=15, start=0, license=None, cache=True): """Get a list of artist biographies Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set license (str): A string specifying the desired license type Returns: A list of biography document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('britney spears') >>> bio = a.get_biographies(results=1)[0] >>> bio['url'] u'http://www.mtvmusic.com/spears_britney' >>> """ if cache and ('biographies' in self.cache) and results==15 and start==0 and license==None: return self.cache['biographies'] else: response = self.get_attribute('biographies', results=results, start=start, license=license) if results==15 and start==0 and license==None: self.cache['biographies'] = ResultList(response['biographies'], 0, response['total']) return ResultList(response['biographies'], start, response['total']) biographies = property(get_biographies) def get_blogs(self, results=15, start=0, cache=True, high_relevance=False): """Get a list of blog articles related to an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An ingteger starting value for the result set Returns: A list of blog document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('bob marley') >>> blogs = a.get_blogs(results=1,start=4) >>> blogs.total 4068 >>> blogs[0]['summary'] But the Kenyans I know relate to music about the same way Americans do. They like their Congolese afropop, and I've known some to be big fans of international acts like <span>Bob</span> <span>Marley</span> and Dolly Parton. They rarely talk about music that's indigenous in the way a South African or Malian or Zimbabwean would, and it's even rarer to actually hear such indigenous music. I do sometimes hear ceremonial chanting from the Maasai, but only when they're dancing for tourists. If East Africa isn't the most musical part ... " >>> """ if cache and ('blogs' in self.cache) and results==15 and start==0 and not high_relevance: return self.cache['blogs'] else: high_relevance = 'true' if high_relevance else 'false' response = self.get_attribute('blogs', results=results, start=start, high_relevance=high_relevance) if results==15 and start==0: self.cache['blogs'] = ResultList(response['blogs'], 0, response['total']) return ResultList(response['blogs'], start, response['total']) blogs = property(get_blogs) def get_familiarity(self, cache=True): """Get our numerical estimation of how familiar an artist currently is to the world Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A float representing familiarity. Example: >>> a = artist.Artist('frank sinatra') >>> a.get_familiarity() 0.65142555825947457 >>> a.familiarity 0.65142555825947457 >>> """ if not (cache and ('familiarity' in self.cache)): response = self.get_attribute('familiarity') self.cache['familiarity'] = response['artist']['familiarity'] return self.cache['familiarity'] familiarity = property(get_familiarity) def get_foreign_id(self, idspace='musicbrainz', cache=True): """Get the foreign id for this artist for a specific id space Args: Kwargs: idspace (str): A string indicating the idspace to fetch a foreign id for. Returns: A foreign ID string Example: >>> a = artist.Artist('fabulous') >>> a.get_foreign_id('7digital') u'7digital:artist:186042' >>> """ if not (cache and ('foreign_ids' in self.cache) and filter(lambda d: d.get('catalog') == idspace, self.cache['foreign_ids'])): response = self.get_attribute('profile', bucket=['id:'+idspace]) foreign_ids = response['artist'].get("foreign_ids", []) self.cache['foreign_ids'] = self.cache.get('foreign_ids', []) + foreign_ids cval = filter(lambda d: d.get('catalog') == idspace, self.cache.get('foreign_ids')) return cval[0].get('foreign_id') if cval else None def get_hotttnesss(self, cache=True): """Get our numerical description of how hottt an artist currently is Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: float: the hotttnesss value Example: >>> a = artist.Artist('hannah montana') >>> a.get_hotttnesss() 0.59906022155998995 >>> a.hotttnesss 0.59906022155998995 >>> """ if not (cache and ('hotttnesss' in self.cache)): response = self.get_attribute('hotttnesss') self.cache['hotttnesss'] = response['artist']['hotttnesss'] return self.cache['hotttnesss'] hotttnesss = property(get_hotttnesss) def get_images(self, results=15, start=0, license=None, cache=True): """Get a list of artist images Args: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set license (str): A string specifying the desired license type Returns: A list of image document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('Captain Beefheart') >>> images = a.get_images(results=1) >>> images.total 49 >>> images[0]['url'] u'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg' >>> """ if cache and ('images' in self.cache) and results==15 and start==0 and license==None: return self.cache['images'] else: response = self.get_attribute('images', results=results, start=start, license=license) if results==15 and start==0 and license==None: self.cache['images'] = ResultList(response['images'], 0, response['total']) return ResultList(response['images'], start, response['total']) images = property(get_images) def get_news(self, results=15, start=0, cache=True, high_relevance=False): """Get a list of news articles found on the web related to an artist Args: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of news document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('Henry Threadgill') >>> news = a.news >>> news.total 41 >>> news[0]['name'] u'Jazz Journalists Association Announces 2010 Jazz Award Winners' >>> """ if cache and ('news' in self.cache) and results==15 and start==0 and not high_relevance: return self.cache['news'] else: high_relevance = 'true' if high_relevance else 'false' response = self.get_attribute('news', results=results, start=start, high_relevance=high_relevance) if results==15 and start==0: self.cache['news'] = ResultList(response['news'], 0, response['total']) return ResultList(response['news'], start, response['total']) news = property(get_news) def get_reviews(self, results=15, start=0, cache=True): """Get reviews related to an artist's work Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of review document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('Ennio Morricone') >>> reviews = a.reviews >>> reviews.total 17 >>> reviews[0]['release'] u'For A Few Dollars More' >>> """ if cache and ('reviews' in self.cache) and results==15 and start==0: return self.cache['reviews'] else: response = self.get_attribute('reviews', results=results, start=start) if results==15 and start==0: self.cache['reviews'] = ResultList(response['reviews'], 0, response['total']) return ResultList(response['reviews'], start, response['total']) reviews = property(get_reviews) def get_similar(self, results=15, start=0, buckets=None, limit=False, cache=True, max_familiarity=None, min_familiarity=None, \ max_hotttnesss=None, min_hotttnesss=None, min_results=None, reverse=False, artist_start_year_before=None, \ artist_start_year_after=None,artist_end_year_before=None,artist_end_year_after=None): """Return similar artists to this one Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for reverse (bool): A boolean indicating whether or not to return dissimilar artists (wrecommender). Defaults to False. Returns: A list of similar Artist objects Example: >>> a = artist.Artist('Sleater Kinney') >>> similars = a.similar[:5] >>> similars [<artist - Bikini Kill>, <artist - Pretty Girls Make Graves>, <artist - Huggy Bear>, <artist - Bratmobile>, <artist - Team Dresch>] >>> """ buckets = buckets or [] kwargs = {} if max_familiarity: kwargs['max_familiarity'] = max_familiarity if min_familiarity: kwargs['min_familiarity'] = min_familiarity if max_hotttnesss: kwargs['max_hotttnesss'] = max_hotttnesss if min_hotttnesss: kwargs['min_hotttnesss'] = min_hotttnesss if min_results: kwargs['min_results'] = min_results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' if reverse: kwargs['reverse'] = 'true' if artist_start_year_before: kwargs['artist_start_year_before'] = artist_start_year_before if artist_start_year_after: kwargs['artist_start_year_after'] = artist_start_year_after if artist_end_year_before: kwargs['artist_end_year_before'] = artist_end_year_before if artist_end_year_after: kwargs['artist_end_year_after'] = artist_end_year_after if cache and ('similar' in self.cache) and results==15 and start==0 and (not kwargs): return [Artist(**util.fix(a)) for a in self.cache['similar']] else: response = self.get_attribute('similar', results=results, start=start, **kwargs) if results==15 and start==0 and (not kwargs): self.cache['similar'] = response['artists'] return [Artist(**util.fix(a)) for a in response['artists']] similar = property(get_similar) def get_songs(self, cache=True, results=15, start=0): """Get the songs associated with an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set Results: A list of Song objects; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('Strokes') >>> a.get_songs(results=5) [<song - Fear Of Sleep>, <song - Red Light>, <song - Ize Of The World>, <song - Evening Sun>, <song - Juicebox>] >>> """ if cache and ('songs' in self.cache) and results==15 and start==0: if not isinstance(self.cache['songs'][0], Song): song_objects = [] for s in self.cache["songs"]: song_objects.append(Song(id=s['id'], title=s['title'], artist_name=self.name, artist_id=self.id)) self.cache['songs'] = song_objects return self.cache['songs'] else: response = self.get_attribute('songs', results=results, start=start) for s in response['songs']: s.update({'artist_id':self.id, 'artist_name':self.name}) songs = [Song(**util.fix(s)) for s in response['songs']] if results==15 and start==0: self.cache['songs'] = ResultList(songs, 0, response['total']) return ResultList(songs, start, response['total']) songs = property(get_songs) def get_terms(self, sort='weight', cache=True): """Get the terms associated with an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. sort (str): A string specifying the desired sorting type (weight or frequency) Results: A list of term document dicts Example: >>> a = artist.Artist('tom petty') >>> a.terms [{u'frequency': 1.0, u'name': u'heartland rock', u'weight': 1.0}, {u'frequency': 0.88569401860168606, u'name': u'jam band', u'weight': 0.9116501862732439}, {u'frequency': 0.9656145118557401, u'name': u'pop rock', u'weight': 0.89777934440040685}, {u'frequency': 0.8414744288140491, u'name': u'southern rock', u'weight': 0.8698567153186606}, {u'frequency': 0.9656145118557401, u'name': u'hard rock', u'weight': 0.85738022655218893}, {u'frequency': 0.88569401860168606, u'name': u'singer-songwriter', u'weight': 0.77427243392312772}, {u'frequency': 0.88569401860168606, u'name': u'rock', u'weight': 0.71158718989399083}, {u'frequency': 0.60874110500110956, u'name': u'album rock', u'weight': 0.69758668733499629}, {u'frequency': 0.74350792060935744, u'name': u'psychedelic', u'weight': 0.68457367494207944}, {u'frequency': 0.77213698386292873, u'name': u'pop', u'weight': 0.65039556639337293}, {u'frequency': 0.41747136183050298, u'name': u'bar band', u'weight': 0.54974975024767025}] >>> """ if cache and ('terms' in self.cache) and sort=='weight': return self.cache['terms'] else: response = self.get_attribute('terms', sort=sort) if sort=='weight': self.cache['terms'] = response['terms'] return response['terms'] terms = property(get_terms) def get_urls(self, cache=True): """Get the urls for an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Results: A url document dict Example: >>> a = artist.Artist('the unicorns') >>> a.get_urls() {u'amazon_url': u'http://www.amazon.com/gp/search?ie=UTF8&keywords=The Unicorns&tag=httpechonecom-20&index=music', u'aolmusic_url': u'http://music.aol.com/artist/the-unicorns', u'itunes_url': u'http://itunes.com/TheUnicorns', u'lastfm_url': u'http://www.last.fm/music/The+Unicorns', u'mb_url': u'http://musicbrainz.org/artist/603c5f9f-492a-4f21-9d6f-1642a5dbea2d.html', u'myspace_url': u'http://www.myspace.com/iwasbornunicorn'} >>> """ if not (cache and ('urls' in self.cache)): response = self.get_attribute('urls') self.cache['urls'] = response['urls'] return self.cache['urls'] urls = property(get_urls) def get_video(self, results=15, start=0, cache=True): """Get a list of video documents found on the web related to an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of video document dicts; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('the vapors') >>> a.get_video(results=1, start=2) [{u'date_found': u'2009-12-28T08:27:48', u'id': u'd02f9e6dc7904f70402d4676516286b9', u'image_url': u'http://i1.ytimg.com/vi/p6c0wOFL3Us/default.jpg', u'site': u'youtube', u'title': u'The Vapors-Turning Japanese (rectangular white vinyl promo)', u'url': u'http://youtube.com/watch?v=p6c0wOFL3Us'}] >>> """ if cache and ('video' in self.cache) and results==15 and start==0: return self.cache['video'] else: response = self.get_attribute('video', results=results, start=start) if results==15 and start==0: self.cache['video'] = ResultList(response['video'], 0, response['total']) return ResultList(response['video'], start, response['total']) video = property(get_video) def get_years_active(self, cache=True): """Get a list of years active dictionaries for an artist Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A list of years active dictionaries; list contains additional attributes 'start' and 'total' Example: >>> a = artist.Artist('radiohead') >>> a.get_years_active() [{ start: 1985 }] >>> """ if cache and ('years_active' in self.cache): return self.cache['years_active'] else: response = self.get_attribute('profile', bucket=['years_active']) self.cache['years_active'] = response['artist']['years_active'] return response['artist']['years_active'] years_active = property(get_years_active) def get_doc_counts(self, cache=True): """ Get the number of related documents of various types for the artist. The types include audio, biographies, blogs, images, news, reviews, songs, videos. Note that these documents can be retrieved by calling artist.<document type>, for example, artist.biographies. Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A dictionary with one key for each document type, mapped to an integer count of documents. Example: >>> a = artist.Artist("The Kinks") >>> a.get_doc_counts() {u'audio': 194, u'biographies': 9, u'blogs': 379, u'images': 177, u'news': 84, u'reviews': 110, u'songs': 499, u'videos': 340} >>> """ if not cache or not ('doc_counts' in self.cache): response = self.get_attribute("profile", bucket='doc_counts') self.cache['doc_counts'] = response['artist']['doc_counts'] return self.cache['doc_counts'] doc_counts = property(get_doc_counts) def search(name=None, description=None, style=None, mood=None, start=0, \ results=15, buckets=None, limit=False, \ fuzzy_match=False, sort=None, max_familiarity=None, min_familiarity=None, \ max_hotttnesss=None, min_hotttnesss=None, test_new_things=None, rank_type=None, \ artist_start_year_after=None, artist_start_year_before=None,artist_end_year_after=None,artist_end_year_before=None): """Search for artists by name, description, or constraint. Args: Kwargs: name (str): the name of an artist description (str): A string describing the artist style (str): A string describing the style/genre of the artist mood (str): A string describing the mood of the artist start (int): An integer starting value for the result set results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets fuzzy_match (bool): A boolean indicating whether or not to search for similar sounding matches (only works with name) max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for artist_start_year_before (int): Returned artists will have started recording music before this year. artist_start_year_after (int): Returned artists will have started recording music after this year. artist_end_year_before (int): Returned artists will have stopped recording music before this year. artist_end_year_after (int): Returned artists will have stopped recording music after this year. rank_type (str): A string denoting the desired ranking for description searches, either 'relevance' or 'familiarity' Returns: A list of Artist objects Example: >>> results = artist.search(name='t-pain') >>> results [<artist - T-Pain>, <artist - T-Pain & Lil Wayne>, <artist - T Pain & 2 Pistols>, <artist - Roscoe Dash & T-Pain>, <artist - Tony Moxberg & T-Pain>, <artist - Flo-Rida (feat. T-Pain)>, <artist - Shortyo/Too Short/T-Pain>] >>> """ limit = str(limit).lower() fuzzy_match = str(fuzzy_match).lower() kwargs = locals() kwargs['bucket'] = buckets or [] del kwargs['buckets'] """Search for artists""" result = util.callm("%s/%s" % ('artist', 'search'), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']] def top_hottt(start=0, results=15, buckets = None, limit=False): """Get the top hotttest artists, according to The Echo Nest Args: Kwargs: results (int): An integer number of results to return start (int): An integer starting value for the result set buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets Returns: A list of hottt Artist objects Example: >>> hot_stuff = artist.top_hottt() >>> hot_stuff [<artist - Deerhunter>, <artist - Sufjan Stevens>, <artist - Belle and Sebastian>, <artist - Glee Cast>, <artist - Linkin Park>, <artist - Neil Young>, <artist - Jimmy Eat World>, <artist - Kanye West>, <artist - Katy Perry>, <artist - Bruno Mars>, <artist - Lady Gaga>, <artist - Rihanna>, <artist - Lil Wayne>, <artist - Jason Mraz>, <artist - Green Day>] >>> """ buckets = buckets or [] kwargs = {} if start: kwargs['start'] = start if results: kwargs['results'] = results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' """Get top hottt artists""" result = util.callm("%s/%s" % ('artist', 'top_hottt'), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']] def top_terms(results=15): """Get a list of the top overall terms Args: Kwargs: results (int): An integer number of results to return Returns: A list of term document dicts Example: >>> terms = artist.top_terms(results=5) >>> terms [{u'frequency': 1.0, u'name': u'rock'}, {u'frequency': 0.99054710039307992, u'name': u'electronic'}, {u'frequency': 0.96131624654034398, u'name': u'hip hop'}, {u'frequency': 0.94358477322411127, u'name': u'jazz'}, {u'frequency': 0.94023302416455468, u'name': u'pop rock'}] >>> """ kwargs = {} if results: kwargs['results'] = results """Get top terms""" result = util.callm("%s/%s" % ('artist', 'top_terms'), kwargs) return result['response']['terms'] def list_terms(type): """Get a list of best terms to use with search Args: Kwargs: type (str): the type of term to return, either 'mood' or 'style' Example: >>> best_terms = artist.list_terms('mood') >>> best_terms [{u'name': u'aggressive'}, {u'name': u'ambient'}, {u'name': u'angry'}, {u'name': u'angst-ridden'}, {u'name': u'bouncy'}, {u'name': u'calming'}, {u'name': u'carefree'}, etc.] """ kwargs = {'type': type} result = util.callm("%s/%s" % ('artist', 'list_terms'), kwargs) return result['response']['terms'] def similar(names=None, ids=None, start=0, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None, seed_catalog=None,artist_start_year_before=None, \ artist_start_year_after=None,artist_end_year_before=None,artist_end_year_after=None): """Return similar artists to this one Args: Kwargs: ids (str/list): An artist id or list of ids names (str/list): An artist name or list of names results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets start (int): An integer starting value for the result set max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for seed_catalog (str): A string specifying the catalog similar artists are restricted to Returns: A list of similar Artist objects Example: >>> some_dudes = [artist.Artist('weezer'), artist.Artist('radiohead')] >>> some_dudes [<artist - Weezer>, <artist - Radiohead>] >>> sims = artist.similar(ids=[art.id for art in some_dudes], results=5) >>> sims [<artist - The Smashing Pumpkins>, <artist - Biffy Clyro>, <artist - Death Cab for Cutie>, <artist - Jimmy Eat World>, <artist - Nerf Herder>] >>> """ buckets = buckets or [] kwargs = {} if ids: if not isinstance(ids, list): ids = [ids] kwargs['id'] = ids if names: if not isinstance(names, list): names = [names] kwargs['name'] = names if max_familiarity is not None: kwargs['max_familiarity'] = max_familiarity if min_familiarity is not None: kwargs['min_familiarity'] = min_familiarity if max_hotttnesss is not None: kwargs['max_hotttnesss'] = max_hotttnesss if min_hotttnesss is not None: kwargs['min_hotttnesss'] = min_hotttnesss if seed_catalog is not None: kwargs['seed_catalog'] = seed_catalog if start: kwargs['start'] = start if results: kwargs['results'] = results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' if artist_start_year_before: kwargs['artist_start_year_before'] = artist_start_year_before if artist_start_year_after: kwargs['artist_start_year_after'] = artist_start_year_after if artist_end_year_before: kwargs['artist_end_year_before'] = artist_end_year_before if artist_end_year_after: kwargs['artist_end_year_after'] = artist_end_year_after result = util.callm("%s/%s" % ('artist', 'similar'), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']] def extract(text='', start=0, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None): """Extract artist names from a block of text. Args: Kwargs: text (str): The text to extract artists from start (int): An integer starting value for the result set results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for Returns: A list of Artist objects Example: >>> results = artist.extract(text='i saw beyonce at burger king, she was eatin, she was eatin') >>> results >>> """ buckets = buckets or [] kwargs = {} kwargs['text'] = text if max_familiarity is not None: kwargs['max_familiarity'] = max_familiarity if min_familiarity is not None: kwargs['min_familiarity'] = min_familiarity if max_hotttnesss is not None: kwargs['max_hotttnesss'] = max_hotttnesss if min_hotttnesss is not None: kwargs['min_hotttnesss'] = min_hotttnesss if start: kwargs['start'] = start if results: kwargs['results'] = results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' result = util.callm("%s/%s" % ('artist', 'extract'), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import unittest from unittest import mock from google.api_core.gapic_v1.method import DEFAULT from google.cloud.monitoring_v3 import AlertPolicy, NotificationChannel from airflow.providers.google.cloud.operators.stackdriver import ( StackdriverDeleteAlertOperator, StackdriverDeleteNotificationChannelOperator, StackdriverDisableAlertPoliciesOperator, StackdriverDisableNotificationChannelsOperator, StackdriverEnableAlertPoliciesOperator, StackdriverEnableNotificationChannelsOperator, StackdriverListAlertPoliciesOperator, StackdriverListNotificationChannelsOperator, StackdriverUpsertAlertOperator, StackdriverUpsertNotificationChannelOperator, ) TEST_TASK_ID = 'test-stackdriver-operator' TEST_FILTER = 'filter' TEST_ALERT_POLICY_1 = { "combiner": "OR", "name": "projects/sd-project/alertPolicies/12345", "enabled": True, "display_name": "test display", "conditions": [ { "condition_threshold": { "comparison": "COMPARISON_GT", "aggregations": [{"alignment_eriod": {'seconds': 60}, "per_series_aligner": "ALIGN_RATE"}], }, "display_name": "Condition display", "name": "projects/sd-project/alertPolicies/123/conditions/456", } ], } TEST_ALERT_POLICY_2 = { "combiner": "OR", "name": "projects/sd-project/alertPolicies/6789", "enabled": False, "display_name": "test display", "conditions": [ { "condition_threshold": { "comparison": "COMPARISON_GT", "aggregations": [{"alignment_period": {'seconds': 60}, "per_series_aligner": "ALIGN_RATE"}], }, "display_name": "Condition display", "name": "projects/sd-project/alertPolicies/456/conditions/789", } ], } TEST_NOTIFICATION_CHANNEL_1 = { "displayName": "sd", "enabled": True, "labels": {"auth_token": "top-secret", "channel_name": "#channel"}, "name": "projects/sd-project/notificationChannels/12345", "type": "slack", } TEST_NOTIFICATION_CHANNEL_2 = { "displayName": "sd", "enabled": False, "labels": {"auth_token": "top-secret", "channel_name": "#channel"}, "name": "projects/sd-project/notificationChannels/6789", "type": "slack", } class TestStackdriverListAlertPoliciesOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverListAlertPoliciesOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER) mock_hook.return_value.list_alert_policies.return_value = [AlertPolicy(name="test-name")] result = operator.execute(None) mock_hook.return_value.list_alert_policies.assert_called_once_with( project_id=None, filter_=TEST_FILTER, format_=None, order_by=None, page_size=None, retry=DEFAULT, timeout=None, metadata=(), ) assert [ { 'combiner': 0, 'conditions': [], 'display_name': '', 'name': 'test-name', 'notification_channels': [], 'user_labels': {}, } ] == result class TestStackdriverEnableAlertPoliciesOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverEnableAlertPoliciesOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER) operator.execute(None) mock_hook.return_value.enable_alert_policies.assert_called_once_with( project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=None, metadata=() ) class TestStackdriverDisableAlertPoliciesOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverDisableAlertPoliciesOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER) operator.execute(None) mock_hook.return_value.disable_alert_policies.assert_called_once_with( project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=None, metadata=() ) class TestStackdriverUpsertAlertsOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverUpsertAlertOperator( task_id=TEST_TASK_ID, alerts=json.dumps({"policies": [TEST_ALERT_POLICY_1, TEST_ALERT_POLICY_2]}) ) operator.execute(None) mock_hook.return_value.upsert_alert.assert_called_once_with( alerts=json.dumps({"policies": [TEST_ALERT_POLICY_1, TEST_ALERT_POLICY_2]}), project_id=None, retry=DEFAULT, timeout=None, metadata=(), ) class TestStackdriverDeleteAlertOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverDeleteAlertOperator( task_id=TEST_TASK_ID, name='test-alert', ) operator.execute(None) mock_hook.return_value.delete_alert_policy.assert_called_once_with( name='test-alert', retry=DEFAULT, timeout=None, metadata=() ) class TestStackdriverListNotificationChannelsOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverListNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER) mock_hook.return_value.list_notification_channels.return_value = [ NotificationChannel(name="test-123") ] result = operator.execute(None) mock_hook.return_value.list_notification_channels.assert_called_once_with( project_id=None, filter_=TEST_FILTER, format_=None, order_by=None, page_size=None, retry=DEFAULT, timeout=None, metadata=(), ) # Depending on the version of google-apitools installed we might receive the response either with or # without mutation_records. assert result in [ [ { 'description': '', 'display_name': '', 'labels': {}, 'name': 'test-123', 'type_': '', 'user_labels': {}, 'verification_status': 0, } ], [ { 'description': '', 'display_name': '', 'labels': {}, 'mutation_records': [], 'name': 'test-123', 'type_': '', 'user_labels': {}, 'verification_status': 0, } ], ] class TestStackdriverEnableNotificationChannelsOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverEnableNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER) operator.execute(None) mock_hook.return_value.enable_notification_channels.assert_called_once_with( project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=None, metadata=() ) class TestStackdriverDisableNotificationChannelsOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverDisableNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER) operator.execute(None) mock_hook.return_value.disable_notification_channels.assert_called_once_with( project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=None, metadata=() ) class TestStackdriverUpsertChannelOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverUpsertNotificationChannelOperator( task_id=TEST_TASK_ID, channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}), ) operator.execute(None) mock_hook.return_value.upsert_channel.assert_called_once_with( channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}), project_id=None, retry=DEFAULT, timeout=None, metadata=(), ) class TestStackdriverDeleteNotificationChannelOperator(unittest.TestCase): @mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook') def test_execute(self, mock_hook): operator = StackdriverDeleteNotificationChannelOperator( task_id=TEST_TASK_ID, name='test-channel', ) operator.execute(None) mock_hook.return_value.delete_notification_channel.assert_called_once_with( name='test-channel', retry=DEFAULT, timeout=None, metadata=() )
import tensorflow as tf from luminoth.datasets.base_dataset import BaseDataset from luminoth.utils.image import ( resize_image_fixed, resize_image, flip_image, random_patch, random_resize, random_distortion, expand ) DATA_AUGMENTATION_STRATEGIES = { 'flip': flip_image, 'patch': random_patch, 'resize': random_resize, 'distortion': random_distortion, 'expand': expand } class ObjectDetectionDataset(BaseDataset): """Abstract object detector dataset module. This module implements some of the basic functionalities every object detector dataset usually needs. Object detection datasets are datasets that have ground-truth information consisting of rectangular bounding boxes. Attributes: dataset_dir (str): Base directory of the dataset. num_epochs (int): Number of epochs the dataset should iterate over. batch_size (int): Batch size the module should return. split (str): Split to consume the data from (usually "train", "val" or "test"). image_min_size (int): Image minimum size, used for resizing images if needed. image_max_size (int): Image maximum size. random_shuffle (bool): To consume the dataset using random shuffle or to just use a regular FIFO queue. """ CONTEXT_FEATURES = { 'image_raw': tf.FixedLenFeature([], tf.string), 'filename': tf.FixedLenFeature([], tf.string), 'width': tf.FixedLenFeature([], tf.int64), 'height': tf.FixedLenFeature([], tf.int64), 'depth': tf.FixedLenFeature([], tf.int64), } SEQUENCE_FEATURES = { 'label': tf.VarLenFeature(tf.int64), 'xmin': tf.VarLenFeature(tf.int64), 'xmax': tf.VarLenFeature(tf.int64), 'ymin': tf.VarLenFeature(tf.int64), 'ymax': tf.VarLenFeature(tf.int64), } def __init__(self, config, name='object_detection_dataset', **kwargs): """ Save general purpose attributes for Dataset module. Args: config: Config object with all the session properties. """ super(ObjectDetectionDataset, self).__init__(config, **kwargs) self._image_min_size = config.dataset.image_preprocessing.get( 'min_size') self._image_max_size = config.dataset.image_preprocessing.get( 'max_size') # In case no keys are defined, default to empty list. self._data_augmentation = config.dataset.data_augmentation or [] def preprocess(self, image, bboxes=None): """Apply transformations to image and bboxes (if available). Transformations are applied according to the config values. """ # Resize images (if needed) image, bboxes, applied_augmentations = self._augment(image, bboxes) image, bboxes, scale_factor = self._resize_image(image, bboxes) return image, bboxes, { 'scale_factor': scale_factor, 'applied_augmentations': applied_augmentations, } def read_record(self, record): """Parse record TFRecord into a set a set of values, names and types that can be queued and then read. Returns: - queue_values: Dict with tensor values. - queue_names: Names for each tensor. - queue_types: Types for each tensor. """ # We parse variable length features (bboxes in a image) as sequence # features context_example, sequence_example = tf.parse_single_sequence_example( record, context_features=self.CONTEXT_FEATURES, sequence_features=self.SEQUENCE_FEATURES ) # Decode image image_raw = tf.image.decode_image( context_example['image_raw'], channels=3 ) image = tf.cast(image_raw, tf.float32) height = tf.cast(context_example['height'], tf.int32) width = tf.cast(context_example['width'], tf.int32) image_shape = tf.stack([height, width, 3]) image = tf.reshape(image, image_shape) label = self._sparse_to_tensor(sequence_example['label']) xmin = self._sparse_to_tensor(sequence_example['xmin']) xmax = self._sparse_to_tensor(sequence_example['xmax']) ymin = self._sparse_to_tensor(sequence_example['ymin']) ymax = self._sparse_to_tensor(sequence_example['ymax']) # Stack parsed tensors to define bounding boxes of shape (num_boxes, 5) bboxes = tf.stack([xmin, ymin, xmax, ymax, label], axis=1) image, bboxes, preprocessing_details = self.preprocess(image, bboxes) filename = tf.cast(context_example['filename'], tf.string) # TODO: Send additional metadata through the queue (scale_factor, # applied_augmentations) queue_dtypes = [tf.float32, tf.int32, tf.string, tf.float32] queue_names = ['image', 'bboxes', 'filename', 'scale_factor'] queue_values = { 'image': image, 'bboxes': bboxes, 'filename': filename, 'scale_factor': preprocessing_details['scale_factor'], } return queue_values, queue_dtypes, queue_names def _augment(self, image, bboxes=None, default_prob=0.5): """Applies different data augmentation techniques. Uses the list of data augmentation configurations, each data augmentation technique has a probability assigned to it (or just uses the default value for the dataset). Procedures are applied sequentially on top of each other according to the order defined in the config. TODO: We need a better way to ensure order using YAML config without ending up with a list of single-key dictionaries. Args: image: A Tensor of shape (height, width, 3). bboxes: A Tensor of shape (total_bboxes, 5). Returns: image: A Tensor of shape (height, width, 3). bboxes: A Tensor of shape (total_bboxes, 5) of type tf.int32. """ applied_data_augmentation = [] for aug_config in self._data_augmentation: if len(aug_config.keys()) != 1: raise ValueError( 'Invalid data_augmentation definition: "{}"'.format( aug_config)) aug_type = list(aug_config.keys())[0] if aug_type not in DATA_AUGMENTATION_STRATEGIES: tf.logging.warning( 'Invalid data augmentation strategy "{}". Ignoring'.format( aug_type)) continue aug_config = aug_config[aug_type] aug_fn = DATA_AUGMENTATION_STRATEGIES[aug_type] random_number = tf.random_uniform([], seed=self._seed) prob = tf.to_float(aug_config.pop('prob', default_prob)) apply_aug_strategy = tf.less(random_number, prob) augmented = aug_fn(image, bboxes, **aug_config) image = tf.cond( apply_aug_strategy, lambda: augmented['image'], lambda: image ) if bboxes is not None: bboxes = tf.cond( apply_aug_strategy, lambda: augmented.get('bboxes'), lambda: bboxes ) applied_data_augmentation.append({aug_type: apply_aug_strategy}) return image, bboxes, applied_data_augmentation def _resize_image(self, image, bboxes=None): """ We need to resize image and bounding boxes when the biggest side dimension is bigger than `self._image_max_size` or when the smaller side is smaller than `self._image_min_size`. Then, using the ratio we used, we need to properly scale the bounding boxes. Args: image: Tensor with image of shape (H, W, 3). bboxes: Tensor with bounding boxes with shape (num_bboxes, 5). where we have (x_min, y_min, x_max, y_max, label) for each one. Returns: image: Tensor with scaled image. bboxes: Tensor with scaled (using the same factor as the image) bounding boxes with shape (num_bboxes, 5). scale_factor: Scale factor used to modify the image (1.0 means no change). """ if self._fixed_resize: resized = resize_image_fixed( image, self._image_fixed_height, self._image_fixed_width, bboxes=bboxes ) else: resized = resize_image( image, bboxes=bboxes, min_size=self._image_min_size, max_size=self._image_max_size ) return resized['image'], resized.get('bboxes'), resized['scale_factor'] def _sparse_to_tensor(self, sparse_tensor, dtype=tf.int32, axis=[1]): return tf.squeeze( tf.cast(tf.sparse_tensor_to_dense(sparse_tensor), dtype), axis=axis )
from unittest import mock from django.utils import timezone from freezegun import freeze_time from rest_framework import test from waldur_mastermind.common.utils import parse_datetime from waldur_mastermind.invoices import models as invoices_models from waldur_mastermind.marketplace import callbacks from waldur_mastermind.marketplace import models as marketplace_models from waldur_mastermind.marketplace.signals import resource_limit_update_succeeded from waldur_mastermind.marketplace.tests import factories as marketplace_factories from waldur_mastermind.marketplace_openstack import ( CORES_TYPE, RAM_TYPE, STORAGE_MODE_DYNAMIC, STORAGE_MODE_FIXED, STORAGE_TYPE, ) from waldur_openstack.openstack_base.tests.fixtures import OpenStackFixture from .. import TENANT_TYPE @freeze_time('2019-09-10') class BaseTenantInvoiceTest(test.APITransactionTestCase): def setUp(self): self.offering = marketplace_factories.OfferingFactory(type=TENANT_TYPE) self.limits = { RAM_TYPE: 1 * 1024, CORES_TYPE: 2, STORAGE_TYPE: 3 * 1024, } self.prices = { RAM_TYPE: 10, CORES_TYPE: 100, STORAGE_TYPE: 1, } for ct in [RAM_TYPE, CORES_TYPE, STORAGE_TYPE]: marketplace_factories.OfferingComponentFactory( offering=self.offering, type=ct, billing_type=marketplace_models.OfferingComponent.BillingTypes.LIMIT, ) def create_plan(self, prices, unit=marketplace_models.Plan.Units.PER_DAY): plan = marketplace_factories.PlanFactory(offering=self.offering, unit=unit) for ct in prices.keys(): marketplace_factories.PlanComponentFactory( plan=plan, component=self.offering.components.get(type=ct), price=prices[ct], ) return plan def create_resource( self, prices, limits, unit=marketplace_models.Plan.Units.PER_DAY ) -> marketplace_models.Resource: plan = self.create_plan(prices, unit) resource = marketplace_factories.ResourceFactory( offering=self.offering, plan=plan, limits=limits, state=marketplace_models.Resource.States.CREATING, ) callbacks.resource_creation_succeeded(resource) return resource def update_resource_limits(self, resource, new_limits): order = marketplace_factories.OrderFactory( project=resource.project, state=marketplace_models.Order.States.EXECUTING, ) order_item = marketplace_factories.OrderItemFactory( order=order, offering=self.offering, resource=resource, type=marketplace_models.OrderItem.Types.UPDATE, state=marketplace_models.OrderItem.States.EXECUTING, limits=new_limits, ) resource_limit_update_succeeded.send( sender=resource.__class__, order_item=order_item ) def delete_resource(self, resource): callbacks.resource_deletion_succeeded(resource) class TenantInvoiceTest(BaseTenantInvoiceTest): def test_when_resource_is_created_invoice_is_updated(self): resource = self.create_resource(self.prices, self.limits) invoice_items = invoices_models.InvoiceItem.objects.filter(resource=resource) self.assertEqual(invoice_items.count(), 3) def test_when_resource_limits_are_updated_invoice_items_are_updated(self): new_limits = { RAM_TYPE: 10 * 1024, CORES_TYPE: 20, STORAGE_TYPE: 30 * 1024, } with freeze_time('2017-01-01'): resource = self.create_resource(self.prices, self.limits) with freeze_time('2017-01-10'): self.update_resource_limits(resource, new_limits) invoice_items = invoices_models.InvoiceItem.objects.filter(resource=resource) self.assertEqual(invoice_items.count(), 3) def test_when_resource_is_deleted_invoice_is_updated(self): resource = self.create_resource(self.prices, self.limits) with freeze_time('2019-09-18'): resource.set_state_terminating() resource.save() self.delete_resource(resource) invoice_item = invoices_models.InvoiceItem.objects.filter( resource=resource ).last() self.assertEqual(invoice_item.end.day, 18) def test_resource_limit_period_is_updated_when_resource_is_terminated(self): resource = self.create_resource(self.prices, self.limits) with freeze_time('2019-09-18'): resource.set_state_terminating() resource.save() resource.set_state_terminated() resource.save() invoice_item = invoices_models.InvoiceItem.objects.filter( resource=resource ).last() self.assertEqual( parse_datetime( invoice_item.details['resource_limit_periods'][-1]['end'] ), timezone.now(), ) class StorageModeInvoiceTest(BaseTenantInvoiceTest): def setUp(self): # Arrange super(StorageModeInvoiceTest, self).setUp() fixture = OpenStackFixture() tenant = fixture.openstack_tenant offering_component = marketplace_models.OfferingComponent.objects.create( offering=self.offering, type='gigabytes_gpfs', billing_type=marketplace_models.OfferingComponent.BillingTypes.LIMIT, ) plan = self.create_plan(self.prices) marketplace_models.PlanComponent.objects.create( component=offering_component, plan=plan, price=10, ) self.resource = marketplace_factories.ResourceFactory( offering=self.offering, plan=plan, limits=self.limits, state=marketplace_models.Resource.States.CREATING, ) callbacks.resource_creation_succeeded(self.resource) self.resource.scope = tenant self.resource.save() tenant.set_quota_limit('vcpu', 6) tenant.set_quota_limit('ram', 10 * 1024) tenant.set_quota_usage('storage', 30 * 1024) tenant.set_quota_usage('gigabytes_gpfs', 100 * 1024) def test_when_storage_mode_is_switched_to_dynamic_limits_are_updated(self): # Act with freeze_time('2019-09-20'): self.offering.plugin_options['storage_mode'] = STORAGE_MODE_DYNAMIC self.offering.save() # Assert self.resource.refresh_from_db() self.assertEqual(self.resource.limits.get('cores'), 6) self.assertEqual(self.resource.limits.get('ram'), 10 * 1024) self.assertEqual(self.resource.limits.get('storage'), None) self.assertEqual(self.resource.limits.get('gigabytes_gpfs'), 100 * 1024) invoice_item = invoices_models.InvoiceItem.objects.filter( resource=self.resource, details__offering_component_type='gigabytes_gpfs' ).get() last_period = invoice_item.details['resource_limit_periods'][-1] self.assertEqual(last_period['quantity'], 100 * 1024) def test_when_storage_mode_is_switched_to_fixed_limits_are_updated(self): # Act with freeze_time('2019-09-20'): self.offering.plugin_options['storage_mode'] = STORAGE_MODE_FIXED self.offering.save() # Assert self.resource.refresh_from_db() self.assertEqual(self.resource.limits.get('cores'), 6) self.assertEqual(self.resource.limits.get('ram'), 10 * 1024) self.assertEqual(self.resource.limits.get('storage'), 30 * 1024) self.assertEqual(self.resource.limits.get('gigabytes_gpfs'), None) invoice_item = invoices_models.InvoiceItem.objects.filter( resource=self.resource ).last() last_period = invoice_item.details['resource_limit_periods'][-1] self.assertEqual(last_period['quantity'], 30) @mock.patch( 'waldur_mastermind.marketplace_openstack.utils.import_limits_when_storage_mode_is_switched' ) def test_when_storage_mode_is_not_switched_limits_are_not_updated( self, mocked_utils ): # Act with freeze_time('2019-09-20'): self.offering.plugin_options['FOO'] = 'BAR' self.offering.save() # Assert self.assertEqual(mocked_utils.call_count, 0)
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class HostType: GPCHECK_HOSTTYPE_UNDEFINED = 0 GPCHECK_HOSTTYPE_APPLIANCE = 1 GPCHECK_HOSTTYPE_GENERIC_LINUX = 2 GPCHECK_HOSTTYPE_GENERIC_SOLARIS = 3 def hosttype_str(type): if type == HostType.GPCHECK_HOSTTYPE_APPLIANCE: return "GPDB Appliance" elif type == HostType.GPCHECK_HOSTTYPE_GENERIC_LINUX: return "Generic Linux Cluster" elif type == HostType.GPCHECK_HOSTTYPE_GENERIC_SOLARIS: return "Generic Solaris Cluster" else: return "Undetected Platform" class omreport: def __init__(self): self.biossetup = dict() # key value pairs self.biossetup_errormsg = None self.bootorder = list() # list of Devices in order of boot self.bootorder_errormsg = None self.remoteaccess = dict() # key value pairs self.remoteaccess_errormsg = None self.vdisks = list() # list of dicts, 1 for each virtual disk self.vdisks_errormsg = None self.controller = dict() # key value pairs self.controller_errormsg = None self.omversion = None self.omversion_errormsg = None self.bios = dict() # key value pairs self.bios_errormsg = None self.alerts = list() # list of alerts... each alert is a dictionary of key value pairs self.alerts_errormsg = None class chkconfig: def __init__(self): self.services = dict() # hash of services, each entry is hash of run levels and boolean value self.xinetd = dict() # hash of services, value is boolean self.errormsg = None class grubconf: def __init__(self): self.serial_declaration = False self.terminal_declaration = False self.ttyS1_declaration = False self.errormsg = None def __str__(self): return "serial_declaration(%s) terminal_declaration(%s) ttyS1_declaration(%s)" % (self.serial_declaration, self.terminal_declaration, self.ttyS1_declaration) class inittab: def __init__(self): self.s1 = False self.defaultRunLevel = None self.errormsg = None def __str__(self): return "s1_declaration(%s) default_run_level(%s)" % (self.s1, self.defaultRunLevel) class connectemc: def __init__(self): self.output = None self.errormsg = None def __str__(self): return self.output class securetty: def __init__(self): self.errormsg = None self.data = set() class bcu: def __init__(self): self.firmware = None self.biosversion = None self.errormsg = None def __str__(self): return "firmware_version=%s|biosversion=%s" % (self.firmware, self.biosversion) class uname: def __init__(self): self.output = None self.errormsg = None def __str__(self): if self.errormsg: return "============= UNAME ERROR ===================\n" + self.errormsg else: return "============= UNAME =========================\n" + self.output # record machine CPU and memory info class machine: def __init__(self): self.total_cpucores = None self.memory_in_MB = None self.errormsg = None def __str__(self): if self.errormsg: return "============= CPU / Memory Info ERROR =======\n" + self.errormsg else: output = "Total CPU cores: %s, Memory: %s MB" % (self.total_cpucores, self.memory_in_MB) return "============= CPU / Memory Info =============\n" + output class hdfs: def __init__(self): self.max_heap_size = 0 self.namenode_heap_size = 0 self.datanode_heap_size = 0 self.site_config = dict() self.errormsg = None def __str__(self): if self.errormsg: return "============= HDFS ERROR ====================\n" + self.errormsg else: output = "max heap size: %sM\n" % self.max_heap_size output = "namenode heap size: %sM\n" % self.namenode_heap_size output = "datanode heap size: %sM\n" % self.datanode_heap_size output += "\n".join(["%s = %s" % (k, self.site_config[k]) for k in sorted(self.site_config.iterkeys())]) return "============= HDFS ==========================\n" + output class diskusage_entry: def __init__(self, fs, size, used, avail, used_percent, mount): self.fs = fs self.size = size self.used = used self.avail = avail self.used_percent = used_percent self.mount = mount def __str__(self): return "%-40s %8s %8s %8s %8s %-20s" % (self.fs, self.size, self.used, self.avail, self.used_percent, self.mount) class diskusage: def __init__(self): self.lines = [] self.errormsg = None def __str__(self): if self.errormsg: return "============= DISK USAGE ERROR ==============\n" + self.errormsg else: output = "%-40s %8s %8s %8s %8s %-20s\n" % ("Filesystem", "Size", "Used", "Avail", "Use%", "Mounted on") output += "\n".join(str(ln) for ln in self.lines) return "============= DISK USAGE ====================\n" + output class sysctl: def __init__(self): self.variables = dict() # option name => option value self.errormsg = None def __str__(self): if self.errormsg: return "============= SYSCTL ERROR ==================\n" + self.errormsg else: output = '\n'.join('%s = %s' % (k, self.variables[k]) for k in sorted(self.variables.iterkeys())) return "============= SYSCTL ========================\n" + output class limitsconf_entry: def __init__(self, domain, type, item, value): self.domain = domain self.type = type self.item = item self.value = value def __str__(self): return "%s %s %s %s" % (self.domain, self.type, self.item, self.value) class limitsconf: def __init__(self): self.lines = list() self.errormsg = None def __str__(self): if self.errormsg: return "============= LIMITS ERROR ==================\n" + self.errormsg else: output = "\n".join(str(ln) for ln in self.lines) return "============= LIMITS ========================\n" + output class mounts: def __init__(self): self.entries = dict() # partition => mount object self.errormsg = None def __str__(self): if self.errormsg: return "============= MOUNT ERROR ===================\n" + self.errormsg else: output = "\n".join(str(self.entries[k]) for k in sorted(self.entries.keys())) return "============= MOUNT =========================\n" + output class GpMount: def __init__(self): self.partition = None self.dir= None self.type = None self.options = set() # mount options def __str__(self): return "%s on %s type %s (%s)" % (self.partition, self.dir, self.type, ",".join(self.options)) class ioschedulers: def __init__(self): self.devices = dict() # device name => scheduler name self.errormsg = None def __str__(self): if self.errormsg: return "============= IO SCHEDULERS ERROR ===========\n" + self.errormsg else: output = "\n".join("%s: %s" % (k, v) for (k, v) in self.devices.items()) return "============= IO SCHEDULERS =================\n" + output class blockdev: def __init__(self): self.ra = dict() # device name => getra value self.errormsg = None def __str__(self): if self.errormsg: return "============= BLOCKDEV RA ERROR =============\n" + self.errormsg else: output = "\n".join("%s: %s" % (k, v) for (k, v) in self.ra.items()) return "============= BLOCKDEV RA ===================\n" + output class ntp: def __init__(self): self.running = False self.hosts = set() self.currentime = None self.errormsg = None def __str__(self): if self.errormsg: return "============= NTPD ERROR =====================\n" + self.errormsg else: output = "(running %s) (time %f) (peers: %s)" % (self.running, self.currenttime, self.hosts) return "============= NTPD ==========================\n" + output class rclocal: def __init__(self): self.isexecutable = False # check that /etc/rc.d/rc.local is executable permissions def __str__(self): return "executable(%s)" % self.isexecutable class solaris_etc_system: def __init__(self): self.parameters = dict() # dictionary of values self.errormsg = None class solaris_etc_project: def __init__(self): self.lines = list() # list of lines in the file self.errormsg = None class solaris_etc_user_attr: def __init__(self): self.lines = list() # list of lines in the file self.errormsg = None class GenericSolarisOutputData: def __init__(self): self.etc_system = None self.etc_project = None self.etc_user_attr = None self.uname = None def __str__(self): grc = "============= /etc/system====================\n" gre = "============= /etc/system ERRORMSG===========\n" output = "%s%s\n%s%s" % (grc, self.etc_system.parameters.__str__(), gre, self.etc_system.errormsg) grc = "============= /etc/project===================\n" gre = "============= /etc/project ==================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.etc_project.lines.__str__(), gre, self.etc_project.errormsg) grc = "============= /etc/user_att==================\n" gre = "============= /etc/user_att==================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.etc_user_attr.lines.__str__(), gre, self.etc_user_attr.errormsg) grc = "============= UNAME==========================\n" gre = "============= UNAME ERRORMSG=================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.uname.__str__(), gre, self.uname.errormsg) return output class GenericLinuxOutputData: def __init__(self): self.uname = None self.machine = None self.hdfs = None self.diskusage = None self.sysctl = None self.limitsconf = None self.mounts = None self.ioschedulers = None self.blockdev = None self.ntp = None def __str__(self): applied_checks = filter(lambda x: x is not None, [ self.uname, self.machine, self.hdfs, self.diskusage, self.sysctl, self.limitsconf, self.mounts, self.ioschedulers, self.blockdev, self.ntp ]) return "\n".join(map(str, applied_checks)) class ApplianceOutputData: def __init__(self): self.chkconfig = None self.omreport = None self.grubconf = None self.mounts = None self.inittab = None self.uname = None self.securetty = None self.bcu = None self.blockdev = None self.rclocal = None self.ioschedulers = None self.sysctl = None self.limitsconf = None self.connectemc = None self.ntp = None def __str__(self): ser = "=============SERVICES=======================\n" xin = "=============XINETD =======================\n" err = "=============CHKCONFIG ERRORMSG=============\n" output = "%s%s\n%s%s\n%s%s" % (ser, self.chkconfig.services.__str__(), xin, self.chkconfig.xinetd.__str__(), err, self.chkconfig.errormsg) omr = "=============OMREPORT VERSION ==============\n" ome = "=============OMREPORT VERSION ERRORMSG======\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.omversion, ome, self.omreport.omversion_errormsg) omr = "=============OMREPORT BIOS==================\n" ome = "=============OMREPORT BIOS ERRORMSG=========\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.bios.__str__(), ome,self.omreport.bios_errormsg) omr = "=============OMREPORT BIOSSETUP=============\n" ome = "=============OMREPORT BIOSSETUP ERRORMSG====\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.biossetup.__str__(), ome,self.omreport.biossetup_errormsg) omr = "=============OMREPORT CONTROLLER============\n" ome = "=============OMREPORT CONTROLLER ERRORMSG===\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.controller.__str__(), ome,self.omreport.controller_errormsg) boo = "=============OMREPORT BOOTORDER=============\n" boe = "=============OMREPORT BOOTORDER ERRORMSG====\n" output = "%s\n%s%s\n%s%s" % (output, boo, self.omreport.bootorder.__str__(), boe, self.omreport.bootorder_errormsg) omr = "=============OMREPORT REMOTEACCESS==========\n" ome = "=============OMREPORT REMOTEACCESS ERRORMSG=\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.remoteaccess.__str__(), ome,self.omreport.remoteaccess_errormsg) omr = "=============OMREPORT ALERTS==========\n" ome = "=============OMREPORT ALERTS ERRORMSG=\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.alerts.__str__(), ome,self.omreport.alerts_errormsg) omr = "=============OMREPORT VIRTUAL DISKS=========\n" ome = "=============OMREPORT VIRTUAL DISKS ERRORMSG\n" output = "%s\n%s%s\n%s%s" % (output, omr, self.omreport.vdisks.__str__(), ome,self.omreport.vdisks_errormsg) grc = "============= GRUB.CONF======================\n" gre = "============= GRUB.CONF ERRORMSG=============\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.grubconf.__str__(), gre, self.grubconf.errormsg) grc = "============= SYSCTL=========================\n" gre = "============= SYSCTL ERRORMSG================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.sysctl.variables.__str__(), gre, self.sysctl.errormsg) grc = "============= LIMITS=========================\n" gre = "============= LIMITS ERRORMSG================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.limitsconf.__str__(), gre, self.limitsconf.errormsg) mnt = "============= MOUNT==========================\n" mte = "============= MOUNT ERRORMSG=================\n" output = "%s\n%s%s\n%s%s" % (output, mnt, self.mounts.__str__(), mte, self.mounts.errormsg) grc = "============= INITTAB========================\n" gre = "============= INITTAB ERRORMSG===============\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.inittab.__str__(), gre, self.inittab.errormsg) grc = "============= UNAME==========================\n" gre = "============= UNAME ERRORMSG=================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.uname.__str__(), gre, self.uname.errormsg) grc = "============= CONNECTEMC=====================\n" gre = "============= CONNECtEMC ERRORMSG============\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.connectemc.__str__(), gre, self.connectemc.errormsg) grc = "============= SECURETTY======================\n" gre = "============= SECURETTY ERRORMSG=============\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.securetty.data.__str__(), gre, self.securetty.errormsg) grc = "============= IO SCHEDULERS==================\n" gre = "============= IO SCHEDULERS ERRORMSG========\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.ioschedulers.devices.__str__(), gre, self.ioschedulers.errormsg) grc = "============= BLOCKDEV RA ====================\n" gre = "============= BLOCKDEV RA ERRORMSG============\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.blockdev.ra.__str__(), gre, self.blockdev.errormsg) grc = "============= BCU CNA ========================\n" gre = "============= BCU CNA ERRORMSG================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.bcu.__str__(), gre, self.bcu.errormsg) grc = "============= /etc/rc.d/rc.local =============\n" output = "%s\n%s%s" % (output, grc, self.rclocal.__str__()) grc = "============= NTPD ===========================\n" gre = "============= NTPD ERRORMSG===================\n" output = "%s\n%s%s\n%s%s" % (output, grc, self.ntp.__str__(), gre, self.ntp.errormsg) return output
"""The WaveBlocks Project IOM plugin providing functions for handling linear combinations of Hagedorn wavepackets. @author: R. Bourquin @copyright: Copyright (C) 2013, 2016 R. Bourquin @license: Modified BSD License """ import numpy as np def add_lincombhawp(self, parameters, timeslots=None, lincombsize=None, wavepacketsize=None, blockid=0, key=("q", "p", "Q", "P", "S")): r"""Add storage for the linear combination of Hagedorn wavepackets. :param parameters: An :py:class:`ParameterProvider` instance with at least the keys ``dimension`` and ``ncomponents``. :param timeslots: The number of time slots we need. Can be set to ``None`` to get automatically growing datasets. :param lincombsize: The (maximal) size ``J`` of the linear combination of wavepackets. If specified this remains fixed for all timeslots. Can be set to ``None`` (default) to get automatically growing datasets. :param wavepacketsize: The (maximal) basis shape size ``K`` of each of the wavepackets. If specified this remains fixed for all timeslots. Can be set to ``None`` (default) to get automatically growing datasets. :param blockid: The ID of the data block to operate on. :param key: Specify which parameters to save. All are independent. :type key: Tuple of valid identifier strings that are ``q``, ``p``, ``Q``, ``P``, ``S`` and ``adQ``. Default is ``("q", "p", "Q", "P", "S")``. """ N = parameters["ncomponents"] D = parameters["dimension"] # TODO: Handle multi-component packets assert N == 1 if timeslots is None: T = 0 Ts = None else: T = timeslots Ts = timeslots if lincombsize is None: J = 0 Js = None csJs = 32 else: J = lincombsize Js = lincombsize csJs = min(32, Js) if wavepacketsize is None: K = 0 Ks = None csKs = 8 else: K = wavepacketsize Ks = wavepacketsize csKs = min(8, Ks) # The overall group containing all lincombwp data grp_lc = self._srf[self._prefixb + str(blockid)].require_group("lincombhawp") # The group for storing the wavepacket basis shapes grp_lc.create_group("basisshapes") # The group for storing the wavepacket parameter set Pi grp_wppi = grp_lc.create_group("Pi") # The group for storing the wavepacket coefficients grp_wpci = grp_lc.create_group("wp_coefficients") # Create the dataset with appropriate parameters daset_tg_lc = grp_lc.create_dataset("timegrid_lc_coefficients", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1) grp_lc.create_dataset("timegrid_wp_parameters", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1) grp_lc.create_dataset("timegrid_wp_coefficients", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1) grp_lc.create_dataset("lincomb_size", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=J) # Linear combination coefficients grp_lc.create_dataset("lc_coefficients", (T, J), dtype=np.complexfloating, chunks=(1, csJs), maxshape=(Ts, Js)) # Linear combination wavepackets grp_lc.create_dataset("basis_shapes_hashes", (T, J, N), dtype=np.integer, chunks=(1, csJs, 1), maxshape=(Ts, Js, N)) grp_lc.create_dataset("basis_sizes", (T, J, N), dtype=np.integer, chunks=(1, csJs, 1), maxshape=(Ts, Js, N)) # Wavepacket parameters if "q" in key and "q" not in grp_wppi.keys(): grp_wppi.create_dataset("q", (T, J, D), dtype=np.complexfloating, chunks=(1, csJs, D), maxshape=(Ts, Js, D)) if "p" in key and "p" not in grp_wppi.keys(): grp_wppi.create_dataset("p", (T, J, D), dtype=np.complexfloating, chunks=(1, csJs, D), maxshape=(Ts, Js, D)) if "Q" in key and "Q" not in grp_wppi.keys(): grp_wppi.create_dataset("Q", (T, J, D, D), dtype=np.complexfloating, chunks=(1, csJs, D, D), maxshape=(Ts, Js, D, D)) if "P" in key and "P" not in grp_wppi.keys(): grp_wppi.create_dataset("P", (T, J, D, D), dtype=np.complexfloating, chunks=(1, csJs, D, D), maxshape=(Ts, Js, D, D)) if "S" in key and "S" not in grp_wppi.keys(): grp_wppi.create_dataset("S", (T, J, 1), dtype=np.complexfloating, chunks=(1, csJs, 1), maxshape=(Ts, Js, 1)) # Wavepacket coefficients for i in range(N): grp_wpci.create_dataset("c_" + str(i), (T, J, K), dtype=np.complexfloating, chunks=(1, csJs, csKs), maxshape=(Ts, Js, Ks)) # Attach pointer to timegrid daset_tg_lc.attrs["pointer"] = 0 grp_wppi.attrs["pointer"] = 0 grp_wpci.attrs["pointer"] = 0 def delete_lincombhawp(self, blockid=0): r"""Remove the stored linear combination. :param blockid: The ID of the data block to operate on. """ try: del self._srf[self._prefixb + str(blockid) + "/lincombhawp"] except KeyError: pass def has_lincombhawp(self, blockid=0): r"""Ask if the specified data block has the desired data tensor. :param blockid: The ID of the data block to operate on. """ return "lincombhawp" in self._srf[self._prefixb + str(blockid)].keys() def save_lincombhawp_description(self, descr, blockid=0): r"""Save the description of this linear combination. :param descr: The description. :param blockid: The ID of the data block to operate on. """ pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp" # Save the description for key, value in descr.items(): self._srf[pathd].attrs[key] = self._save_attr_value(value) def save_lincombhawp_coefficients(self, coefficients, timestep, blockid=0): r"""Save the coefficients of the linear combination to a file. :param coefficients: The coefficients of the linear combination of wavepackets. :type coefficients: A single, suitable ``ndarray``. :param timestep: The timestep at which we save the data. :param blockid: The ID of the data block to operate on. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_lc_coefficients" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/lc_coefficients" timeslot = self._srf[pathtg].attrs["pointer"] # Write the data self.must_resize(pathlcs, timeslot) J = np.size(coefficients) self._srf[pathlcs][timeslot] = J self.must_resize(pathd, timeslot) if not J == 0: self.must_resize(pathd, J - 1, axis=1) self._srf[pathd][timeslot, :J] = np.squeeze(coefficients) # Write the timestep to which the stored values belong into the timegrid self.must_resize(pathtg, timeslot) self._srf[pathtg][timeslot] = timestep # Update the pointer self._srf[pathtg].attrs["pointer"] += 1 def save_lincombhawp_wavepacket_parameters(self, parameters, timestep, blockid=0, key=("q", "p", "Q", "P", "S")): r"""Save the parameter set :math:`\Pi` of the Hagedorn wavepacket :math:`\Psi` to a file. :param parameters: The parameter set of the Hagedorn wavepacket. :type parameters: A ``list`` containing the (five) ``ndarrays`` like :math:`(q,p,Q,P,S)` :param timestep: The timestep at which we save the data. :param blockid: The ID of the data block to operate on. :param key: Specify which parameters to save. All are independent. :type key: Tuple of valid identifier strings that are ``q``, ``p``, ``Q``, ``P``, ``S`` and ``adQ``. Default is ``("q", "p", "Q", "P", "S")``. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_wp_parameters" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/Pi/" timeslot = self._srf[pathd].attrs["pointer"] # TODO: This an assumption based on data layout and stable J = parameters[0].shape[0] # Write the basis size self.must_resize(pathlcs, timeslot) self._srf[pathlcs][timeslot] = J # Write the parameters for key, item in zip(key, parameters): self.must_resize(pathd + key, timeslot) self.must_resize(pathd + key, J - 1, axis=1) self._srf[pathd + key][timeslot, :J, ...] = item # Write the timestep to which the stored values belong into the timegrid self.must_resize(pathtg, timeslot) self._srf[pathtg][timeslot] = timestep # Update the pointer self._srf[pathd].attrs["pointer"] += 1 def save_lincombhawp_wavepacket_coefficients(self, coefficients, basisshapes, timestep=None, blockid=0): r"""Save the coefficients of the Hagedorn wavepacket linear combination to a file. Warning: we do only save tha hash of the basis shapes here! You have to save the basis shape with the corresponding function too. :param coefficients: The coefficients of the Hagedorn wavepacket. :type coefficients: A ``list`` with :math:`N` suitable ``ndarrays``. :param basisshapes: The corresponding basis shapes of the Hagedorn wavepacket. :type basisshapes: A ``list`` with :math:`N` :py:class:`BasisShape` subclass instances. :param timestep: The timestep at which we save the data. :param blockid: The ID of the data block to operate on. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_wp_coefficients" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" pathbsi = "/" + self._prefixb + str(blockid) + "/lincombhawp/basis_sizes" pathbsh = "/" + self._prefixb + str(blockid) + "/lincombhawp/basis_shapes_hashes" pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/wp_coefficients/" timeslot = self._srf[pathd].attrs["pointer"] # Write the lincomb size basissizes = [K.get_basis_size() for K in basisshapes] J = len(basissizes) self.must_resize(pathlcs, timeslot) self._srf[pathlcs][timeslot] = J # Write all basis sizes self.must_resize(pathbsi, timeslot) self.must_resize(pathbsi, J - 1, axis=1) self._srf[pathbsi][timeslot, :J, 0] = np.array(basissizes) # Write basis shape hashes basisshapeshashes = np.array([hash(K) for K in basisshapes]) self.must_resize(pathbsh, timeslot) self.must_resize(pathbsh, J - 1, axis=1) self._srf[pathbsh][timeslot, :J, 0] = basisshapeshashes # Write the wavepackets coefficients data coefficients = np.atleast_2d(coefficients) j, k = coefficients.shape # TODO: Allow wavepackets with multiple components index = 0 pathc = pathd + "c_" + str(index) # Do we have to resize due to changed number of packets or coefficients self.must_resize(pathc, timeslot) self.must_resize(pathc, j - 1, axis=1) self.must_resize(pathc, k - 1, axis=2) self._srf[pathc][timeslot, :j, :k] = coefficients # Write the timestep to which the stored values belong into the timegrid self.must_resize(pathtg, timeslot) self._srf[pathtg][timeslot] = timestep # Update the pointer self._srf[pathd].attrs["pointer"] += 1 def save_lincombhawp_wavepacket_basisshapes(self, basisshapes, blockid=0): r"""Save the basis shapes of the linear combination of Hagedorn wavepacket to a file. :param basisshapes: A list of the basis shapes of the linear combination. :param blockid: The ID of the data block to operate on. """ pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/basisshapes/" for basisshape in basisshapes: ha = hash(basisshape) name = "basis_shape_" + str(ha) # Chech if we already stored this basis shape if name not in self._srf[pathd].keys(): # TODO: Consider storing all hashes in one big dataset # Create new data set daset = self._srf[pathd].create_dataset("basis_shape_" + str(ha), (1,), dtype=np.integer) daset[0] = ha # Save the description descr = basisshape.get_description() for key, value in descr.items(): daset.attrs[key] = self._save_attr_value(value) def load_lincombhawp_description(self, blockid=0): r"""Load the description of this linear combination. :param blockid: The ID of the data block to operate on. """ pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp" # Load and return all descriptions available descr = {} for key, value in self._srf[pathd].attrs.items(): descr[key] = self._load_attr_value(value) return descr def load_lincombhawp_timegrid(self, blockid=0, key=("coeffs", "packets")): r"""Load the timegrid of this linear combination. :param blockid: The ID of the data block to operate on. :param key: Specify which linear combination timegrids to load. All are independent. :type key: Tuple of valid identifier strings that are ``ceoffs`` and ``packets``. Default is ``("coeffs", "packets")``. """ tg = [] for item in key: if item == "coeffs": pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_lc_coefficients" tg.append(self._srf[pathtg][:]) elif item == "packets": pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_lc_packets" tg.append(self._srf[pathtg][:]) if len(tg) == 1: return tg[0] else: return tuple(tg) def load_lincombhawp_size(self, timestep=None, blockid=0): r"""Load the size (number of packets) of this linear combination. :param timestep: Load only the data of this timestep. :param blockid: The ID of the data block to operate on. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_lc_coefficients" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" if timestep is not None: index = self.find_timestep_index(pathtg, timestep) return self._srf[pathlcs][index] else: index = slice(None) return self._srf[pathlcs][index] def load_lincombhawp_coefficients(self, timestep=None, blockid=0): r"""Load the coefficients of this linear combination. :param timestep: Load only the data of this timestep. :param blockid: The ID of the data block to operate on. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_lc_coefficients" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/lc_coefficients" if timestep is not None: index = self.find_timestep_index(pathtg, timestep) J = self._srf[pathlcs][index] return self._srf[pathd][index, :J] else: index = slice(None) return self._srf[pathd][index, :] def load_lincombhawp_wavepacket_parameters(self, timestep=None, blockid=0, key=("q", "p", "Q", "P", "S")): r"""Load the wavepacket parameters. :param timestep: Load only the data of this timestep. :param blockid: The ID of the data block to operate on. :param key: Specify which parameters to load. All are independent. :type key: Tuple of valid identifier strings that are ``q``, ``p``, ``Q``, ``P``, ``S`` and ``adQ``. Default is ``("q", "p", "Q", "P", "S")``. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_wp_parameters" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/Pi/" if timestep is not None: index = self.find_timestep_index(pathtg, timestep) J = self._srf[pathlcs][index] params = tuple([self._srf[pathd + k][index, :J, ...] for k in key]) else: params = tuple([self._srf[pathd + k][:, :, ...] for k in key]) return params def load_lincombhawp_wavepacket_coefficients(self, timestep=None, get_hashes=False, blockid=0): r"""Load the wavepacket coefficients. :param timestep: Load only the data of this timestep. :param get_hashes: Return the corresponding basis shape hashes. :param blockid: The ID of the data block to operate on. """ pathtg = "/" + self._prefixb + str(blockid) + "/lincombhawp/timegrid_wp_coefficients" pathlcs = "/" + self._prefixb + str(blockid) + "/lincombhawp/lincomb_size" pathbsh = "/" + self._prefixb + str(blockid) + "/lincombhawp/basis_shapes_hashes" pathbsi = "/" + self._prefixb + str(blockid) + "/lincombhawp/basis_sizes" pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/wp_coefficients/" # TODO: Allow wavepackets with multiple components i = 0 if timestep is not None: index = self.find_timestep_index(pathtg, timestep) Js = slice(0, self._srf[pathlcs][index]) Ks = slice(0, np.max(self._srf[pathbsi][index, :, 0])) else: index = slice(None) Js = slice(None) Ks = slice(None) # Load the hash data if get_hashes is True: hashes = self._srf[pathbsh][index, Js] # Load the coefficient data data = self._srf[pathd + "c_" + str(i)][index, Js, Ks] if get_hashes is True: return (hashes, data) else: return data def load_lincombhawp_wavepacket_basisshapes(self, the_hash=None, blockid=0): r"""Load the basis shapes by hash. :param the_hash: The hash of the basis shape whose description we want to load. :param blockid: The ID of the data block to operate on. """ pathd = "/" + self._prefixb + str(blockid) + "/lincombhawp/basisshapes/" if the_hash is None: # Load and return all descriptions available descrs = {} for ahash in self._srf[pathd].keys(): # TODO: What data exactly do we want to return? descr = {} for key, value in self._srf[pathd + ahash].attrs.items(): descr[key] = self._load_attr_value(value) # 'ahash' is "basis_shape_..." and we want only the "..." part descrs[int(ahash[12:])] = descr return descrs else: the_hash = int(the_hash) name = "basis_shape_" + str(the_hash) # Chech if we already stored this basis shape if name in self._srf[pathd].keys(): # TODO: What data exactly do we want to return? descr = {} for key, value in self._srf[pathd + name].attrs.items(): descr[key] = self._load_attr_value(value) return descr else: raise IndexError("No basis shape with given hash {}".format(hash)) # # The following two methods are only for convenience and are NOT particularly efficient. # def load_lincombhawp(self, timestep, blockid=0, key=("q", "p", "Q", "P", "S")): r"""Load a linear combination at a given timestep and return a fully configured :py:class:`LinearCombinationOfHAWPs` instance. This method just calls some other :py:class:`IOManager` methods in the correct order. It is included only for convenience and is not particularly efficient. :param timestep: The timestep :math:`n` we load the wavepacket. :param blockid: The ID of the data block to operate on. :return: A :py:class:`LinearCombinationOfHAWPs` instance. """ from WaveBlocksND.LinearCombinationOfHAWPs import LinearCombinationOfHAWPs from WaveBlocksND.BlockFactory import BlockFactory BF = BlockFactory() descr = self.load_lincombhawp_description(blockid=blockid) # Empty linear combination J = self.load_lincombhawp_size(timestep=timestep, blockid=blockid) if J == 0: return None # A new and empty linear combination LC = LinearCombinationOfHAWPs(descr["dimension"], descr["ncomponents"], descr["eps"], number_packets=J) # Basis shapes K_descrs = self.load_lincombhawp_wavepacket_basisshapes(blockid=blockid) K = {ha: BF.create_basis_shape(de) for ha, de in K_descrs.items()} # Coefficients and basis shape hashes hashes, coeffs = self.load_lincombhawp_wavepacket_coefficients(timestep=timestep, get_hashes=True, blockid=blockid) Ks = [K[ha] for ha in np.squeeze(hashes)] LC.set_wavepacket_coefficients(coeffs, Ks) # Parameters Pi = self.load_lincombhawp_wavepacket_parameters(timestep=timestep, blockid=blockid, key=key) LC.set_wavepacket_parameters(Pi) # Cj Cj = self.load_lincombhawp_coefficients(timestep=timestep, blockid=blockid) LC.set_coefficients(Cj) return LC def save_lincombhawp(self, lincomb, timestep, blockid=0): r"""Save a linear combination of Hagedorn wavepackets at a given timestep and read all data to save from the :py:class:`LinearCombinationOfHAWPs` instance provided. This method just calls some other :py:class:`IOManager` methods in the correct order. It is included only for convenience and is not particularly efficient. We assume the linear combination is already set up with the correct :py:meth:`add_lincombhawp` method call. :param lincomb: The :py:class:`LinearCombinationOfHAWPs` instance we want to save. :param timestep: The timestep :math:`n` at which we save the linear combination. :param blockid: The ID of the data block to operate on. """ # Description self.save_lincombhawp_description(lincomb.get_description(), blockid=blockid) # Wavepackets Ks = lincomb.get_basis_shapes() self.save_lincombhawp_wavepacket_basisshapes(Ks, blockid=blockid) Pi = lincomb.get_wavepacket_parameters() self.save_lincombhawp_wavepacket_parameters(Pi, timestep=timestep, blockid=blockid) Ck = lincomb.get_wavepacket_coefficients() self.save_lincombhawp_wavepacket_coefficients(Ck, Ks, timestep=timestep, blockid=blockid) # Coefficients Cj = lincomb.get_coefficients() self.save_lincombhawp_coefficients(Cj, timestep=timestep, blockid=blockid)
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """File to run unititest on models.py""" import os import sys sys.path.append(os.path.abspath('codes/')) import unittest import tensorflow as tf from trainer import input_pipeline_dask as test from trainer import models from shutil import copyfile CSV_PATH = os.path.abspath('data/iris_formatted.csv') TASK_TYPE = 'classification' TASK_TYPE_2 = 'regression' TARGET_VAR = 'label' COLUMN_TO_DROP = 'Cluster_indices' TASK_NAME = 'classification' NUM_EPOCHS = 1 BATCH_SIZE = 4 BUFFER_SIZE = 4 NAME = 'Train' NA_VALUES = None MODE = 23 class Params(object): """Class to give parameters to model_fn""" def __init__( self, feature_names): """Initialize the class with parameters Arguments: feature_names : list of string, names of each feature """ self.degree = 2 self.batch_size = 64 self.feature_names = feature_names self.learning_rate = 0.03 self.num_of_clusters = 3 self.optimizer = tf.train.FtrlOptimizer(learning_rate=0.03) def __getitem__( self, key): """Initialise getitem Arguments: key : key to be added """ return getattr(self, key) class TestModel(unittest.TestCase): """Class to perform unititest""" def is_not_used(self): """"Function to remove no-self-use warning""" pass def _create_deep_cols(self, feat_cols): """Create deep cols Arguments: feat_cols : list of string, list of names of feature columns """ self.is_not_used() deep_cols = list() for i in feat_cols: if i.dtype == 'string': i = tf.feature_column.indicator_column(i) deep_cols.append(i) else: deep_cols.append(i) return deep_cols def init_inputreader(self): '''Initialise class InputReader''' self.is_not_used() return test.InputReader( csv_path=CSV_PATH, task_type=TASK_TYPE, target_var=TARGET_VAR, gcs_path=False, na_values=NA_VALUES ) def init_inputreader_2(self): """ Initialise class InputReader """ self.is_not_used() return test.InputReader( csv_path=CSV_PATH, task_type=TASK_TYPE_2, target_var=TARGET_VAR, gcs_path=False, na_values=NA_VALUES ) def init_basicstats(self): """ Initialise class BasicStats """ self.is_not_used() return test.BasicStats() def get_feature_columns(self): """ Initialise class Dataset """ iread = self.init_inputreader() stats = self.init_basicstats() ddf, _ = iread._parse_csv() copyfile(CSV_PATH, '/tmp/data.csv') ddf, mean, std_dev, csv_defaults = stats.clean_data( df=ddf, target_var=TARGET_VAR, task_type=TASK_TYPE, name=TASK_NAME ) mapped = stats.find_vocab(ddf) data = test.DatasetInput( NUM_EPOCHS, BATCH_SIZE, BUFFER_SIZE, csv_defaults, ddf.columns, TARGET_VAR, TASK_TYPE ) return data._create_feature_columns( mapped, mean, std_dev ) def get_feature_columns_2(self): """ Initialise class Dataset """ iread = self.init_inputreader_2() stats = self.init_basicstats() ddf, _ = iread._parse_csv() copyfile(CSV_PATH, '/tmp/data.csv') ddf, mean, std_dev, csv_defaults = stats.clean_data( df=ddf, target_var=TARGET_VAR, task_type=TASK_TYPE_2, name=TASK_NAME ) mapped = stats.find_vocab(ddf) data = test.DatasetInput( num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, buffer_size=BUFFER_SIZE, csv_defaults=csv_defaults, csv_cols=ddf.columns, target_var=TARGET_VAR, task_type=TASK_TYPE_2) return data._create_feature_columns( dictionary=mapped, mean=mean, std_dev=std_dev) def init_canned_model(self): """ Initialise class CannedModel """ model_name = 'linearclassifier' feature_columns = self.get_feature_columns() return models.CannedModel( model_name=model_name, feature_columns=feature_columns ) def test_canned_build_model(self): """ Test function build_model in CannedModel """ model = self.init_canned_model() estimator_model = model.build_model() self.assertIsInstance(estimator_model, tf.estimator.LinearClassifier) def test_linear_classifier(self): """ Test function linearclassifier in CannedModel """ model = self.init_canned_model() estimator_model = model.linear_classifier() self.assertIsInstance(estimator_model, tf.estimator.LinearClassifier) def init_canned_model_1(self): """ Initialise CannedModel """ model_name = 'dnnclassifier' feature_columns = self.get_feature_columns() deep_cols = self._create_deep_cols(feature_columns) return models.CannedModel( model_name=model_name, feature_columns=feature_columns, deep_columns=deep_cols ) def test_canned_build_model_1(self): """ Test function build_model in CannedModel """ model = self.init_canned_model_1() estimator_model = model.build_model() self.assertIsInstance(estimator_model, tf.estimator.DNNClassifier) def test_dnn_classifier(self): """ Test function dnnclassifier in CannedModel """ model = self.init_canned_model_1() estimator_model = model.dnn_classifier() self.assertIsInstance(estimator_model, tf.estimator.DNNClassifier) def init_canned_model_2(self): """ Initialise CannedModel """ model_name = 'combinedclassifier' feature_columns = self.get_feature_columns() deep_cols = self._create_deep_cols(feature_columns) return models.CannedModel( model_name=model_name, feature_columns=feature_columns, deep_columns=deep_cols ) def test_canned_build_model_2(self): """ Test function build_model in CannedModel """ model = self.init_canned_model_2() estimator_model = model.build_model() self.assertIsInstance( estimator_model, tf.estimator.DNNLinearCombinedClassifier) def test_combined_classifier(self): """ Test function combinedclassifier in CannedModel """ model = self.init_canned_model_2() estimator_model = model.combined_classifier() self.assertIsInstance( estimator_model, tf.estimator.DNNLinearCombinedClassifier) def init_canned_model_3(self): """ Test function linearregressor in CannedModel """ model_name = 'linearregressor' feature_columns = self.get_feature_columns_2() return models.CannedModel( model_name=model_name, feature_columns=feature_columns ) def test_canned_build_model_3(self): """ Test function build_model in CannedModel """ model = self.init_canned_model_3() estimator_model = model.build_model() self.assertIsInstance( estimator_model, tf.estimator.LinearRegressor) def test_linear_regressor(self): """ Test function linearregressor in CannedModel """ model = self.init_canned_model_3() estimator_model = model.linear_regressor() self.assertIsInstance( estimator_model, tf.estimator.LinearRegressor) def init_canned_model_4(self): """ Initialise CannedModel """ model_name = 'dnnregressor' feature_columns = self.get_feature_columns_2() deep_cols = self._create_deep_cols(feature_columns) return models.CannedModel( model_name=model_name, feature_columns=feature_columns, deep_columns=deep_cols ) def test_canned_build_model_4(self): """ Test function build_model in CannedModel """ model = self.init_canned_model_4() estimator_model = model.build_model() self.assertIsInstance(estimator_model, tf.estimator.DNNRegressor) def test_dnn_regressor(self): """ Test function dnnregressor in CannedModel """ model = self.init_canned_model_4() estimator_model = model.dnn_regressor() self.assertIsInstance(estimator_model, tf.estimator.DNNRegressor) def init_canned_model_5(self): """ Initialise CannedModel """ model_name = 'combinedregressor' feature_columns = self.get_feature_columns_2() deep_cols = self._create_deep_cols(feature_columns) return models.CannedModel( model_name=model_name, feature_columns=feature_columns, deep_columns=deep_cols ) def test_canned_build_model_5(self): """ Test function build_model in CannedModel """ model = self.init_canned_model_5() estimator_model = model.build_model() self.assertIsInstance( estimator_model, tf.estimator.DNNLinearCombinedRegressor) def test_combined_regressor(self): """ Test function combinedregressor in CannedModel """ model = self.init_canned_model_5() estimator_model = model.combined_regressor() self.assertIsInstance( estimator_model, tf.estimator.DNNLinearCombinedRegressor) def init_custom_model(self): """ Initialise class CustomModel """ self.is_not_used() custom_model_name = 'polynomialclassifier' batch_size = 64 feature_names = ['id', 'a', 'b', 'c', 'd'] optimizer = tf.train.FtrlOptimizer( learning_rate=0.001 ) return models.CustomModel( model_name=custom_model_name, batch_size=batch_size, optimizer=optimizer, feature_names=feature_names, model_dir=None, config=None, warm_start_from=None, polynomial_degree=2 ) def test_Custom_build_model(self): """ Test function build_model in CustomModel """ model = self.init_custom_model() estimator_model = model.build_model() self.assertIsInstance(estimator_model, tf.estimator.Estimator) def init_dataset(self): """ Initialise class Dataset """ iread = self.init_inputreader() stats = self.init_basicstats() ddf, _ = iread._parse_csv() copyfile(CSV_PATH, '/tmp/data.csv') ddf, _, _, csv_defaults = stats.clean_data( df=ddf, target_var=TARGET_VAR, task_type=TASK_TYPE, name=TASK_NAME) return test.DatasetInput( num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, buffer_size=BUFFER_SIZE, csv_defaults=csv_defaults, csv_cols=ddf.columns, target_var=TARGET_VAR, task_type=TASK_TYPE) def get_features(self): """Get features and labels from input_fn""" data = self.init_dataset() feat, label = data.input_fn(NAME) return feat, label def test_poly_regression_model_fn(self): """ Test poly_regression_model_fn """ model = self.init_custom_model() iread = self.init_inputreader() _, list_cols = iread._parse_csv() list_cols.remove(TARGET_VAR) params = Params(list_cols) features, labels = self.get_features() train_mode = tf.estimator.ModeKeys.TRAIN eval_mode = tf.estimator.ModeKeys.EVAL predict_mode = tf.estimator.ModeKeys.PREDICT # train_mode estimator_spec = model.poly_regression_model_fn( features=features, labels=labels, mode=train_mode, params=params) self.assertIsInstance(estimator_spec, tf.estimator.EstimatorSpec) # eval_mode estimator_spec = model.poly_regression_model_fn( features=features, labels=labels, mode=eval_mode, params=params) self.assertIsInstance(estimator_spec, tf.estimator.EstimatorSpec) # predict_mode estimator_spec = model.poly_regression_model_fn( features=features, labels=labels, mode=predict_mode, params=params) self.assertIsInstance(estimator_spec, tf.estimator.EstimatorSpec) def test_poly_classification_model_fn(self): """ Test poly_classification_model_fn """ model = self.init_custom_model() iread = self.init_inputreader() _, list_cols = iread._parse_csv() list_cols.remove(TARGET_VAR) params = Params(list_cols) features, labels = self.get_features() train_mode = tf.estimator.ModeKeys.TRAIN eval_mode = tf.estimator.ModeKeys.EVAL predict_mode = tf.estimator.ModeKeys.PREDICT # train_mode estimator_spec = model.poly_classification_model_fn( features=features, labels=labels, mode=train_mode, params=params) self.assertIsInstance(estimator_spec, tf.estimator.EstimatorSpec) # eval_mode estimator_spec = model.poly_classification_model_fn( features=features, labels=labels, mode=eval_mode, params=params) self.assertIsInstance(estimator_spec, tf.estimator.EstimatorSpec) # predict_mode estimator_spec = model.poly_classification_model_fn( features=features, labels=labels, mode=predict_mode, params=params) self.assertIsInstance(estimator_spec, tf.estimator.EstimatorSpec) if __name__ == "__main__": unittest.main()
""" Miscellaneous methods that help in different aspects of model validation. Does not require explicit instantiation. The following methods are available: ======================================= ==================================== Action Method ======================================= ==================================== View JSON data in web browser :meth:`view_json_tree` Prepare test for execution :meth:`prepare_run_test_offline` Run the validation test :meth:`run_test_offline` Register result with validation service :meth:`upload_test_result` Run test and register result :meth:`run_test` Generate HTML report of test results :meth:`generate_HTML_report` Generate PDF report of test results :meth:`generate_PDF_report` Obtain score matrix for test results :meth:`generate_score_matrix` Get Pandas DataFrame from score matrix :meth:`get_raw_dataframe` Display score matrix in web browser :meth:`display_score_matrix_html` ======================================= ==================================== """ import os import uuid import json import pickle import webbrowser import argparse import collections import unicodedata import pkg_resources try: raw_input except NameError: # Python 3 raw_input = input unicode = str # Python 3 renamed the unicode type to str import sciunit from datetime import datetime from . import TestLibrary, ModelCatalog from .datastores import CollabDataStore, URI_SCHEME_MAP try: # Python 3 from urllib.parse import urlparse except ImportError: # Python 2 from urlparse import urlparse from importlib import import_module import mimetypes import math try: from pathlib import Path except ImportError: from pathlib2 import Path # Python 2 backport def view_json_tree(data): """Displays the JSON tree structure inside the web browser This method can be used to view any JSON data, generated by any of the validation client's methods, in a tree-like representation. Parameters ---------- data : string JSON object represented as a string. Returns ------- None Does not return any data. JSON displayed inside web browser. Examples -------- >>> model = model_catalog.get_model(alias="HCkt") >>> from hbp_validation_framework import utils >>> utils.view_json_tree(model) """ _make_js_file(data) script_dir = os.path.dirname(__file__) rel_path = "jsonTreeViewer/index.htm" abs_file_path = os.path.join(script_dir, rel_path) webbrowser.open(abs_file_path, new=2) def _make_js_file(data): """ Creates a JavaScript file from give JSON object; loaded by the browser This eliminates cross-origin issues with loading local data files (e.g. via jQuery) """ script_dir = os.path.dirname(__file__) rel_path = "jsonTreeViewer/data.js" abs_file_path = os.path.join(script_dir, rel_path) with open(abs_file_path, 'w') as outfile: outfile.write("var data = '") json.dump(data, outfile) outfile.write("'") def prepare_run_test_offline(username="", password=None, environment="production", test_instance_id="", test_id="", test_alias="", test_version="", client_obj=None, **params): """Gather info necessary for running validation test This method will select the specified test and prepare a config file enabling offline execution of the validation test. The observation file required by the test is also downloaded and stored locally. The test can be specified in the following ways (in order of priority): 1. specify `test_instance_id` corresponding to test instance in test library 2. specify `test_id` and `test_version` 3. specify `test_alias` and `test_version` Note: for (2) and (3) above, if `test_version` is not specified, then the latest test version is retrieved Parameters ---------- username : string Your HBP Collaboratory username. password : string Your HBP Collaboratory password. environment : string, optional Used to indicate whether being used for development/testing purposes. Set as `production` as default for using the production system, which is appropriate for most users. When set to `dev`, it uses the `development` system. For other values, an external config file would be read (the latter is currently not implemented). test_instance_id : UUID System generated unique identifier associated with test instance. test_id : UUID System generated unique identifier associated with test definition. test_alias : string User-assigned unique identifier associated with test definition. test_version : string User-assigned identifier (unique for each test) associated with test instance. client_obj : ModelCatalog/TestLibrary object Used to easily create a new ModelCatalog/TestLibrary object if either exist already. Avoids need for repeated authentications; improves performance. Also, helps minimize being blocked out by the authentication server for repeated authentication requests (applicable when running several tests in quick succession, e.g. in a loop). **params : list Keyword arguments to be passed to the Test constructor. Note ---- Should be run on node having access to external URLs (i.e. with internet access) Returns ------- path The absolute path of the generated test config file Examples -------- >>> test_config_file = utils.prepare_run_test_offline(username="shailesh", test_alias="CDT-5", test_version="5.0") """ if client_obj: test_library = TestLibrary.from_existing(client_obj) else: test_library = TestLibrary(username, password, environment=environment) if test_instance_id == "" and test_id == "" and test_alias == "": raise Exception("test_instance_id or test_id or test_alias needs to be provided for finding test.") # Gather specified test info test_instance_json = test_library.get_test_instance(instance_id=test_instance_id, test_id=test_id, alias=test_alias, version=test_version) test_id = test_instance_json["test_definition_id"] test_instance_id = test_instance_json["id"] test_instance_path = test_instance_json["path"] # Download test observation to local storage test_observation_path = test_library.get_test_definition(test_id=test_id)["data_location"] parse_result = urlparse(test_observation_path) datastore = URI_SCHEME_MAP[parse_result.scheme](auth=test_library.auth) base_folder = os.path.join(os.getcwd(), "hbp_validation_framework", test_id, datetime.now().strftime("%Y%m%d-%H%M%S")) test_observation_file = datastore.download_data([test_observation_path], local_directory=base_folder)[0] # Create test config required for offline execution test_info = {} test_info["test_id"] = test_id test_info["test_instance_id"] = test_instance_id test_info["test_instance_path"] = test_instance_path test_info["test_observation_file"] = os.path.basename(os.path.realpath(test_observation_file)) test_info["params"] = params # Save test info to config file test_config_file = os.path.join(base_folder, "test_config.json") with open(test_config_file, 'w') as file: file.write(json.dumps(test_info, indent=4)) return test_config_file def run_test_offline(model="", test_config_file=""): """Run the validation test This method will accept a model, located locally, run the test specified via the test config file (generated by :meth:`prepare_run_test_offline`), and store the results locally. Parameters ---------- model : sciunit.Model A :class:`sciunit.Model` instance. test_config_file : string Absolute path of the test config file generated by :meth:`prepare_run_test_offline` Note ---- Can be run on node(s) having no access to external URLs (i.e. without internet access). Also, it is required that the test_config_file and the test_observation_file are located in the same directory. Returns ------- path The absolute path of the generated test result file Examples -------- >>> test_result_file = utils.run_test_offline(model=model, test_config_file=test_config_file) """ if not os.path.isfile(test_config_file) : raise Exception("'test_config_file' should direct to file describing the test configuration.") base_folder = os.path.dirname(os.path.realpath(test_config_file)) # Load the test info from config file with open(test_config_file) as file: test_info = json.load(file) # Identify test class path path_parts = test_info["test_instance_path"].split(".") cls_name = path_parts[-1] module_name = ".".join(path_parts[:-1]) test_module = import_module(module_name) test_cls = getattr(test_module, cls_name) # Read observation data required by test with open(os.path.join(base_folder, test_info["test_observation_file"]), 'rb') as file: observation_data = file.read() content_type = mimetypes.guess_type(test_info["test_observation_file"])[0] if content_type == "application/json": observation_data = json.loads(observation_data) # Create the :class:`sciunit.Test` instance params = test_info["params"] test = test_cls(observation=observation_data, **params) test.uuid = test_info["test_instance_id"] print("----------------------------------------------") print("Test name: ", test.name) print("Test type: ", type(test)) print("----------------------------------------------") # Check the model if not isinstance(model, sciunit.Model): raise TypeError("`model` is not a sciunit Model!") print("----------------------------------------------") print("Model name: ", model.name) print("Model type: ", type(model)) print("----------------------------------------------") # Run the test t_start = datetime.utcnow() score = test.judge(model, deep_error=True) t_end = datetime.utcnow() print("----------------------------------------------") print("Score: ", score.score) if "figures" in score.related_data: print("Output files: ") for item in score.related_data["figures"]: print(item) print("----------------------------------------------") score.runtime = str(int(math.ceil((t_end-t_start).total_seconds()))) + " s" score.exec_timestamp = t_end # score.exec_platform = str(self._get_platform()) # Save result info to file Path(os.path.join(base_folder, "results")).mkdir(parents=True, exist_ok=True) test_result_file = os.path.join(base_folder, "results", "result__" + model.name + "__" + datetime.now().strftime("%Y%m%d%H%M%S") + ".pkl") with open(test_result_file, 'wb') as file: pickle.dump(score, file) return test_result_file def upload_test_result(username="", password=None, environment="production", test_result_file="", storage_collab_id="", register_result=True, client_obj=None): """Register the result with the Validation Service This method will register the validation result specified via the test result file (generated by :meth:`run_test_offline`) with the validation service. Parameters ---------- username : string Your HBP Collaboratory username. password : string Your HBP Collaboratory password. environment : string, optional Used to indicate whether being used for development/testing purposes. Set as `production` as default for using the production system, which is appropriate for most users. When set to `dev`, it uses the `development` system. For other values, an external config file would be read (the latter is currently not implemented). test_result_file : string Absolute path of the test result file generated by :meth:`run_test_offline` storage_collab_id : string Collab ID where output files should be stored; if empty, stored in model's host Collab. register_result : boolean Specify whether the test results are to be scored on the validation framework. Default is set as True. client_obj : ModelCatalog/TestLibrary object Used to easily create a new ModelCatalog/TestLibrary object if either exist already. Avoids need for repeated authentications; improves performance. Also, helps minimize being blocked out by the authentication server for repeated authentication requests (applicable when running several tests in quick succession, e.g. in a loop). Note ---- Should be run on node having access to external URLs (i.e. with internet access) Returns ------- UUID UUID of the test result that has been created. object score object evaluated by the test. Examples -------- >>> result_id, score = utils.upload_test_result(username="shailesh", test_result_file=test_result_file) """ if not register_result: return None, None if not os.path.isfile(test_result_file) : raise Exception("'test_result_file' should direct to file containg the test result data.") # Load result info from file with open(test_result_file, 'rb') as file: score = pickle.load(file) # Register the result with the HBP validation framework if client_obj: model_catalog = ModelCatalog.from_existing(client_obj) else: model_catalog = ModelCatalog(username, password, environment=environment) model_instance_uuid = model_catalog.find_model_instance_else_add(score.model) model_instance_json = model_catalog.get_model_instance(instance_id=model_instance_uuid) model_json = model_catalog.get_model(model_id=model_instance_json["model_id"]) model_host_collab_id = model_json["app"]["collab_id"] model_name = model_json["name"] if not storage_collab_id: storage_collab_id = model_host_collab_id score.related_data["project"] = storage_collab_id # Check if result with same hash has already been uploaded for # this (model instance, test instance) combination; if yes, don't register result # result_json = { # "model_instance_id": model_instance_uuid, # "test_code_id": score.test.uuid, # "score": score.score, # "runtime": score.runtime, # "exectime": score.exec_timestamp#, # # "platform": score.exec_platform # } # score.score_hash = str(hash(json.dumps(result_json, sort_keys=True, default = str))) test_library = TestLibrary.from_existing(model_catalog) # results = test_library.list_results(model_version_id=model_instance_uuid, test_code_id=score.test.uuid)["results"] # duplicate_results = [x["id"] for x in results if x["hash"] == score.score_hash] # if duplicate_results: # raise Exception("An identical result has already been registered on the validation framework.\nExisting Result UUID = {}".format(", ".join(duplicate_results))) # `.replace(" ", "_")` used to avoid Collab storage path errors due to spaces collab_folder = "validation_results/{}/{}_{}".format(datetime.now().strftime("%Y-%m-%d"),model_name.replace(" ", "_"), datetime.now().strftime("%Y%m%d-%H%M%S")) collab_storage = CollabDataStore(collab_id=storage_collab_id, base_folder=collab_folder, auth=test_library.auth) response = test_library.register_result(test_result=score, data_store=collab_storage) return response, score def run_test(username="", password=None, environment="production", model="", test_instance_id="", test_id="", test_alias="", test_version="", storage_collab_id="", register_result=True, client_obj=None, **params): """Run validation test and register result This will execute the following methods by relaying the output of one to the next: 1. :meth:`prepare_run_test_offline` 2. :meth:`run_test_offline` 3. :meth:`upload_test_result` Parameters ---------- username : string Your HBP Collaboratory username. password : string Your HBP Collaboratory password. environment : string, optional Used to indicate whether being used for development/testing purposes. Set as `production` as default for using the production system, which is appropriate for most users. When set to `dev`, it uses the `development` system. For other values, an external config file would be read (the latter is currently not implemented). model : sciunit.Model A :class:`sciunit.Model` instance. test_instance_id : UUID System generated unique identifier associated with test instance. test_id : UUID System generated unique identifier associated with test definition. test_alias : string User-assigned unique identifier associated with test definition. test_version : string User-assigned identifier (unique for each test) associated with test instance. storage_collab_id : string Collab ID where output files should be stored; if empty, stored in model's host Collab. register_result : boolean Specify whether the test results are to be scored on the validation framework. Default is set as True. client_obj : ModelCatalog/TestLibrary object Used to easily create a new ModelCatalog/TestLibrary object if either exist already. Avoids need for repeated authentications; improves performance. Also, helps minimize being blocked out by the authentication server for repeated authentication requests (applicable when running several tests in quick succession, e.g. in a loop). **params : list Keyword arguments to be passed to the Test constructor. Note ---- Should be run on node having access to external URLs (i.e. with internet access) Returns ------- UUID UUID of the test result that has been created. object score object evaluated by the test. Examples -------- >>> result_id, score = utils.run_test(username="HBP_USERNAME", password="HBP_PASSWORD" environment="production", model=cell_model, test_alias="basalg_msn_d1", test_version="1.0", storage_collab_id="8123", register_result=True) """ test_config_file = prepare_run_test_offline(username=username, password=password, environment=environment, test_instance_id=test_instance_id, test_id=test_id, test_alias=test_alias, test_version=test_version, client_obj=client_obj, **params) test_result_file = run_test_offline(model=model, test_config_file=test_config_file) result_id, score = upload_test_result(username=username, password=password, environment=environment, test_result_file=test_result_file, storage_collab_id=storage_collab_id, register_result=register_result, client_obj=client_obj) return result_id, score def generate_HTML_report(username="", password=None, environment="production", model_list=[], model_instance_list=[], test_list=[], test_instance_list=[], result_list=[], collab_id=None, client_obj=None): """Generates an HTML report for specified test results This method will generate an HTML report for the specified test results. Parameters ---------- username : string Your HBP collaboratory username. environment : string, optional Used to indicate whether being used for development/testing purposes. Set as `production` as default for using the production system, which is appropriate for most users. When set to `dev`, it uses the `development` system. For other values, an external config file would be read (the latter is currently not implemented). model_list : list List of model UUIDs or aliases for which score matrix is to be generated. model_instance_list : list List of model instance UUIDs for which score matrix is to be generated. test_list : list List of test UUIDs or aliases for which score matrix is to be generated. test_instance_list : list List of test instance UUIDs for which score matrix is to be generated. result_list : list List of result UUIDs for which score matrix is to be generated. collab_id : string, optional Collaboratory ID where hyperlinks to results are to be redirected. If unspecified, these data units will not have clickable hyperlinks. client_obj : ModelCatalog/TestLibrary object Used to easily create a new ModelCatalog/TestLibrary object if either exist already. Avoids need for repeated authentications; improves performance. Also, helps minimize being blocked out by the authentication server for repeated authentication requests (applicable when running several tests in quick succession, e.g. in a loop). Returns ------- string The absolute path of the generated HTML report list List of valid UUIDs for which the HTML report was generated Examples -------- >>> result_list = ["a618a6b1-e92e-4ac6-955a-7b8c6859285a", "793e5852-761b-4801-84cb-53af6f6c1acf"] >>> report_path, valid_uuids = utils.generate_HTML_report(username="shailesh", result_list=result_list) >>> report_path, valid_uuids = utils.generate_HTML_report(html_report_path="report.html") """ try: from jinja2 import Environment, FileSystemLoader except ImportError: print("Please install the following package: Jinja2") return if client_obj: model_catalog = ModelCatalog.from_existing(client_obj) else: model_catalog = ModelCatalog(username, password, environment=environment) test_library = TestLibrary.from_existing(model_catalog) if collab_id: # check if app exists; if not then create MCapp_navID = model_catalog.exists_in_collab_else_create(collab_id) model_catalog.set_app_config(collab_id=collab_id, app_id=MCapp_navID, only_if_new="True") VFapp_navID = test_library.exists_in_collab_else_create(collab_id) test_library.set_app_config(collab_id=collab_id, app_id=VFapp_navID, only_if_new="True") # retrieve all model instances from specified models if model_list: for entry in model_list: try: uuid.UUID(entry, version=4) data = model_catalog.list_model_instances(model_id=entry) except ValueError: data = model_catalog.list_model_instances(alias=entry) for item in data: model_instance_list.append(item["id"]) # retrieve all test instances from specified tests if test_list: for entry in test_list: try: uuid.UUID(entry, version=4) data = test_library.list_test_instances(test_id=entry) except ValueError: data = test_library.list_test_instances(alias=entry) for item in data: test_instance_list.append(item["id"]) # extend results list to include all results corresponding to above # identified model instances and test instances for item in model_instance_list: results_json = test_library.list_results(model_version_id=item)["results"] result_list.extend([r["id"] for r in results_json]) for item in test_instance_list: results_json = test_library.list_results(test_code_id=item)["results"] result_list.extend([r["id"] for r in results_json]) # remove duplicate result UUIDs result_list = list(collections.OrderedDict.fromkeys(result_list).keys()) # TODO: ensure atleast one valid result UUID to be evaluated; else create no report, alert user # utilize each result entry result_summary_table = [] # list of dicts, each with 4 keys -> result_id, model_label, test_label, score list_results = [] list_models = [] list_model_instances = [] list_tests = [] list_test_instances = [] valid_result_uuids = [] for r_id in result_list: result = test_library.get_result(result_id = r_id)["results"] if len(result) == 0: continue # invalid result UUID result = result[0] valid_result_uuids.append(r_id) model_instance = result.pop("model_version") test_instance = result.pop("test_code") model = model_instance.pop("model") test = test_instance.pop("test_definition") list_results.append(result) list_models.append(model) list_model_instances.append(model_instance) list_tests.append(test) list_test_instances.append(test_instance) model_label = (model["alias"] if model["alias"] else model["name"]) + " (" + str(model_instance["version"]) + ")" test_label = (test["alias"] if test["alias"] else test["name"]) + " (" + str(test_instance["version"]) + ")" if collab_id: result_url = "https://collab.humanbrainproject.eu/#/collab/{}/nav/{}?state=result.{}".format(str(collab_id),str(VFapp_navID), r_id) model_url = "https://collab.humanbrainproject.eu/#/collab/{}/nav/{}?state=model.{}".format(str(collab_id),str(MCapp_navID), model["id"]) test_url = "https://collab.humanbrainproject.eu/#/collab/{}/nav/{}?state=test.{}".format(str(collab_id),str(VFapp_navID), test["id"]) result_summary_table.append({"result_id":(r_id, result_url), "model_label":(model_label, model_url), "test_label":(test_label, test_url), "score":(result["score"], result_url)}) else: result_summary_table.append({"result_id":(r_id), "model_label":(model_label), "test_label":(test_label), "score":(result["score"])}) timestamp = datetime.now() report_name = str("HBP_VF_Report_" + timestamp.strftime("%Y%m%d-%H%M%S") + ".html") template_path = pkg_resources.resource_filename("hbp_validation_framework", "templates/report_template.html") env = Environment(loader=FileSystemLoader(os.path.dirname(template_path))) template = env.get_template(os.path.basename(template_path)) template_vars = {"report_name" : report_name, "created_date" : timestamp.strftime("%Y-%m-%d %H:%M:%S"), "result_summary_table" : result_summary_table, "list_results" : list_results, "list_models" : list_models, "list_model_instances" : list_model_instances, "list_tests" : list_tests, "list_test_instances" : list_test_instances} html_out = template.render(template_vars) with open(report_name, "w") as outfile: outfile.write(html_out) return os.path.abspath(report_name), valid_result_uuids def generate_PDF_report(html_report_path=None, username="", password=None, environment="production", model_list=[], model_instance_list=[], test_list=[], test_instance_list=[], result_list=[], collab_id=None, only_results=False, client_obj=None): """Generates a PDF report for specified test results This method will generate a PDF report for the specified test results. Parameters ---------- html_report_path : string Path to HTML report generated via :meth:`generate_HTML_report()`. If specified, then all other parameters (except `only_results`) are irrelevant. If not specified, then this method will generate both an HTML report as well as a PDF report. username : string Your HBP collaboratory username. environment : string, optional Used to indicate whether being used for development/testing purposes. Set as `production` as default for using the production system, which is appropriate for most users. When set to `dev`, it uses the `development` system. For other values, an external config file would be read (the latter is currently not implemented). model_list : list List of model UUIDs or aliases for which score matrix is to be generated. model_instance_list : list List of model instance UUIDs for which score matrix is to be generated. test_list : list List of test UUIDs or aliases for which score matrix is to be generated. test_instance_list : list List of test instance UUIDs for which score matrix is to be generated. result_list : list List of result UUIDs for which score matrix is to be generated. collab_id : string, optional Collaboratory ID where hyperlinks to results are to be redirected. If unspecified, these data units will not have clickable hyperlinks. only_results : boolean, optional Indicates whether output PDF should contain only result related info. Set to `False` as default. When set to `True`, the PDF will have info on the result, model, model instance, test and test instance. client_obj : ModelCatalog/TestLibrary object Used to easily create a new ModelCatalog/TestLibrary object if either exist already. Avoids need for repeated authentications; improves performance. Also, helps minimize being blocked out by the authentication server for repeated authentication requests (applicable when running several tests in quick succession, e.g. in a loop). Returns ------- string The absolute path of the generated PDF report list List of valid UUIDs for which the PDF report was generated; returns `None` if `html_report_path` is set Examples -------- >>> result_list = ["a618a6b1-e92e-4ac6-955a-7b8c6859285a", "793e5852-761b-4801-84cb-53af6f6c1acf"] >>> report_path, valid_uuids = utils.generate_PDF_report(username="shailesh", result_list=result_list) >>> report_path, valid_uuids = utils.generate_PDF_report(html_report_path="report.html", only_results=True) """ params = locals() try: from pyppdf import save_pdf except ImportError: print("Please install the following package: pyppdf") return if only_results: try: from bs4 import BeautifulSoup except ImportError: print("To use 'only_results=True', please install the following package: beautifulsoup4") return valid_result_uuids = None if not html_report_path: params.pop("html_report_path") params.pop("only_results") html_report_path, valid_result_uuids = generate_HTML_report(**params) with open(html_report_path, "r") as html_file: html_string = html_file.read() # Exchanging the order of these JS files is sufficient to remove the # 'tabs' organization of info in HTML file to a sequential layout script_jquery = "https://code.jquery.com/jquery-3.3.1.js" script_materialize = "https://cdnjs.cloudflare.com/ajax/libs/materialize/0.97.3/js/materialize.min.js" html_string = html_string.replace(script_materialize, script_jquery, 1) html_string = html_string.replace(script_jquery, script_materialize, 1) if only_results: # remove tabs navigation bar html_soup = BeautifulSoup(html_string, "html.parser") for item in html_soup.findAll("ul", {"class": "tabs"}): item.parent.decompose() # remove model and test tabs for item in html_soup.findAll('div', id=lambda x: x and x.startswith(('model_', 'test_'))): item.decompose() html_string = html_soup filepath = os.path.splitext(os.path.abspath(html_report_path))[0] + ".pdf" content = save_pdf(output_file=filepath, html=html_string, args_dict={"pdf":{"format":"A4", "landscape":False, "printBackground":True, "margin":{"top":'0.25in', "right":'0.25in', "bottom":'0.25in', "left":'0.25in'}}}, goto="temp") return filepath, valid_result_uuids def generate_score_matrix(username="", password=None, environment="production", model_list=[], model_instance_list=[], test_list=[], test_instance_list=[], result_list=[], collab_id=None, client_obj=None): """Generates a styled pandas dataframe with score matrix This method will generate a styled pandas dataframe for the specified test results. Each row will correspond to a particular model instance, and the columns correspond to the test instances. Parameters ---------- username : string Your HBP collaboratory username. environment : string, optional Used to indicate whether being used for development/testing purposes. Set as `production` as default for using the production system, which is appropriate for most users. When set to `dev`, it uses the `development` system. For other values, an external config file would be read (the latter is currently not implemented). model_list : list List of model UUIDs or aliases for which score matrix is to be generated. model_instance_list : list List of model instance UUIDs for which score matrix is to be generated. test_list : list List of test UUIDs or aliases for which score matrix is to be generated. test_instance_list : list List of test instance UUIDs for which score matrix is to be generated. result_list : list List of result UUIDs for which score matrix is to be generated. collab_id : string, optional Collaboratory ID where hyperlinks to results are to be redirected. If unspecified, the scores will not have clickable hyperlinks. client_obj : ModelCatalog/TestLibrary object Used to easily create a new ModelCatalog/TestLibrary object if either exist already. Avoids need for repeated authentications; improves performance. Also, helps minimize being blocked out by the authentication server for repeated authentication requests (applicable when running several tests in quick succession, e.g. in a loop). Note ---- Only the latest score entry from specified input for a particular model instance and test instance combination will be selectedself. To get the raw (unstyled) dataframe, use :meth:`get_raw_dataframe()` Returns ------- pandas.io.formats.style.Styler A 2-dimensional matrix representation of the scores list List of entries from specified input that could not be resolved and thus ignored Examples -------- >>> result_list = ["a618a6b1-e92e-4ac6-955a-7b8c6859285a", "793e5852-761b-4801-84cb-53af6f6c1acf"] >>> styled_df, excluded = utils.generate_score_matrix(username="shailesh", result_list=result_list) """ try: import pandas as pd except ImportError: print("Please install the following package: pandas") return if client_obj: model_catalog = ModelCatalog.from_existing(client_obj) else: model_catalog = ModelCatalog(username, password, environment=environment) if client_obj: test_library = TestLibrary.from_existing(client_obj) else: test_library = TestLibrary(username, password, environment=environment) if collab_id: # check if app exists; if not then create VFapp_navID = test_library.exists_in_collab_else_create(collab_id) test_library.set_app_config(collab_id=collab_id, app_id=VFapp_navID, only_if_new="True") # retrieve all model instances from specified models if model_list: for entry in model_list: try: uuid.UUID(entry, version=4) data = model_catalog.list_model_instances(model_id=entry) except ValueError: data = model_catalog.list_model_instances(alias=entry) for item in data: model_instance_list.append(item["id"]) # retrieve all test instances from specified tests if test_list: for entry in test_list: try: uuid.UUID(entry, version=4) data = test_library.list_test_instances(test_id=entry) except ValueError: data = test_library.list_test_instances(alias=entry) for item in data: test_instance_list.append(item["id"]) # extend results list to include all results corresponding to above # identified model instances and test instances for item in model_instance_list: results_json = test_library.list_results(model_version_id=item)["results"] result_list.extend([r["id"] for r in results_json]) for item in test_instance_list: results_json = test_library.list_results(test_code_id=item)["results"] result_list.extend([r["id"] for r in results_json]) # remove duplicate result UUIDs result_list = list(collections.OrderedDict.fromkeys(result_list).keys()) results_dict = collections.OrderedDict() model_instances_dict = collections.OrderedDict() test_instances_dict = collections.OrderedDict() excluded_results = [] # not latest entry for a particular model instance and test instance combination for r_id in result_list: result = test_library.get_result(result_id = r_id)["results"][0] # '#*#' is used as separator between score and result UUID (latter used for constructing hyperlink) if result["test_code_id"] in results_dict.keys(): if result["model_version_id"] not in results_dict[result["test_code_id"]].keys(): results_dict[result["test_code_id"]][result["model_version_id"]] = [result["timestamp"], str(result["score"]) + "#*#" + r_id] elif result["timestamp"] > results_dict[result["test_code_id"]][result["model_version_id"]][0]: excluded_results.append(results_dict[result["test_code_id"]][result["model_version_id"]][1].split('#*#')[1]) results_dict[result["test_code_id"]][result["model_version_id"]] = [result["timestamp"], str(result["score"]) + "#*#" + r_id] else: excluded_results.append(r_id) else: results_dict[result["test_code_id"]] = {result["model_version_id"]: [result["timestamp"], str(result["score"]) + "#*#" + r_id]} if result["model_version_id"] not in model_instances_dict.keys(): model_instances_dict[result["model_version_id"]] = None if result["test_code_id"] not in model_instances_dict.keys(): test_instances_dict[result["test_code_id"]] = None # update results_dict values to contain only scores; remove timestamps for key_test_inst in results_dict.keys(): for key_model_inst, value in results_dict[key_test_inst].items(): results_dict[key_test_inst][key_model_inst] = value[1] # form test labels: test_name(version_name) for t_id in test_instances_dict.keys(): test = test_library.get_test_instance(instance_id=t_id) test_version = test["version"] test = test_library.get_test_definition(test_id=test["test_definition_id"]) test_name = test["alias"] if test["alias"] else test["name"] test_label = test_name + " (" + str(test_version) + ")" test_instances_dict[t_id] = test_label # form model labels: model_name(version_name) for m_id in model_instances_dict.keys(): model = model_catalog.get_model_instance(instance_id=m_id) model_version = model["version"] model = model_catalog.get_model(model_id=model["model_id"]) model_name = model["alias"] if model["alias"] else model["name"] model_label = model_name + "(" + str(model_version) + ")" model_instances_dict[m_id] = model_label data = {} for t_key, t_val in test_instances_dict.items(): score_vals = [] for m_key in model_instances_dict.keys(): try: score_vals.append(results_dict[t_key][m_key]) except KeyError: score_vals.append(None) data[t_val] = score_vals df = pd.DataFrame(data, index = model_instances_dict.values()) def make_clickable(value): if not value: return value score, result_uuid = value.split('#*#') if collab_id: result_url = "https://collab.humanbrainproject.eu/#/collab/{}/nav/{}?state=result.{}".format(str(collab_id),str(VFapp_navID), result_uuid) return '<a target="_blank" href="{}">{}</a>'.format(result_url,score) else: return score return df.style.format(make_clickable), excluded_results def get_raw_dataframe(styled_df): """Creates DataFrame from output of :meth`generate_score_matrix` This method creates a raw DataFrame objects from its styled variant as generated by :meth`generate_score_matrix`. The cell values in latter could contain additional data (i.e. result UUIDs) for creating hyperlinks. This is filtered out here such that the cell values only contain scores. Parameters ---------- styled_df : pandas.io.formats.style.Styler Styled DataFrame object generated by :meth`generate_score_matrix` Returns ------- pandas.core.frame.DataFrame A 2-dimensional matrix representation of the scores without any formatting Examples -------- >>> df = utils.get_raw_dataframe(styled_df) """ def make_raw_scores(value): if value: return value.split('#*#')[0] return styled_df.data.applymap(make_raw_scores) def display_score_matrix_html(styled_df=None, df=None): """Displays score matrix generated from :meth`generate_score_matrix` inside web browser This method displays the scoring matrix generated by :meth`generate_score_matrix` inside a web browser. Input can either be the styled DataFrame object generated by :meth`generate_score_matrix` or the raw DataFrame object from :meth`get_raw_dataframe`. Parameters ---------- styled_df : pandas.io.formats.style.Styler Styled DataFrame object generated by :meth`generate_score_matrix` df : pandas.core.frame.DataFrame DataFrame object generated by :meth`get_raw_dataframe` Returns ------- None Does not return any data. JSON displayed inside web browser. Examples -------- >>> utils.display_score_matrix_html(styled_df) """ if styled_df is None and df is None: raise Exception("styled_df or df needs to be provided for displaying the score matrix.") filename = "hbp_vf_score_dataframe_{}.html".format(datetime.now().strftime("%Y%m%d-%H%M%S")) if styled_df: df = get_raw_dataframe(styled_df) df.to_html(filename) webbrowser.open(filename, new=2)
# -*- coding: utf-8 -*- """ Created on Tue Mar 8 13:33:52 2016 @author: Travis """ import numpy as np from scipy import signal import matplotlib.pyplot as plt def med_filt(chan,kernel = 11,thresh = 3): ''' Identifies fractures within the signal by thresholding. Uses a median filtering method to subtract the baseline from the absolute value of the signal. Inputs: chan - (nparray) input signal array optional ----------- kernel (int) kernel size of the median filter. thresh (int) the number of standard deviations above the mean to consider the identified peak a fracture. Outputs: fractures (list) indicies of the identified fractures within the signal ''' channel = abs(chan) filtChannel = signal.medfilt(channel, kernel_size = kernel) corr_chan = channel-filtChannel fractures = sliding_max(corr_chan,3000,thresh) time = 1/48000*np.linspace(0,len(corr_chan),len(corr_chan)) plt.plot(time,corr_chan) plt.plot([time[fractures],time[fractures]],[min(corr_chan),max(corr_chan)],'r') return time[fractures] def sliding_max(chan,kernel_size,threshold): ''' Identifies local maximum within the signal by chunking the signal. Throws out maximums that are less than a threshold defined using the mean and standard deviation of the signal. Inputs: chan - (nparray) input signal array optional ----------- kernel_size (int) the size of the chunks. threshold (int) the number of standard deviations above the mean to consider the identified peak a fracture. Outputs: fractures (list) indicies of the identified fractures within the signal ''' fractures = [] cutoff = np.mean(abs(chan)) + threshold*np.std(abs(chan)) pad = kernel_size-(len(chan) % kernel_size); pad_chan = np.hstack((chan,np.zeros((pad,)))) dim = len(pad_chan)/kernel_size res_chan = np.reshape(abs(pad_chan),(dim,kernel_size)) ind = np.argmax(res_chan, axis = 1) indind = np.linspace(0,len(pad_chan)-kernel_size,dim) fract = (np.array((ind + indind),dtype=int)).tolist() for frac in fract: if abs(pad_chan[frac]) > cutoff: fractures.append(frac) return fractures def cwt_ridges(chan,dwnsmp_rat = 48,max_width = 20,thresh = 3): ''' Identifies frctures within the signal using the continuous wavelet transform on the dewnsampled signal (significant downsampling is required in order to perform the cwt on most machines). Inputs: chan - (nparray) input signal array optional ----------- dwnsmp_rat (int) the fraction of the original sample rate. max_width (int) the maximum wavelet width considered. thresh (int) the number of standard deviations above the mean to consider the identified peak a fracture. Outputs: fractures (list) indicies of the identified fractures within the signal ''' fractures = [] # downsamples signal in order to perform the transform dec_chan = signal.decimate(chan,dwnsmp_rat) Fs = 48000/dwnsmp_rat dec_time = 1/Fs*np.linspace(0,len(dec_chan),len(dec_chan)) # wavelet widths widths = np.linspace(1,max_width,10) peakInd = signal.find_peaks_cwt(abs(dec_chan), widths, noise_perc=.1, min_snr=1, min_length = 3) plt.plot(dec_time,dec_chan) lmin = min(dec_chan) ulim = max(dec_chan) for peak in peakInd: if abs(dec_chan[peak]) > np.mean(abs(dec_chan)) + thresh*np.std(abs(dec_chan)): fractures.append(peak) plt.plot([dec_time[peak],dec_time[peak]],[lmin,ulim],'r') return dec_time[fractures] def spectrogram_ridges(chan,gap_thresh = 50,min_length = 150): ''' Identifies fractures in the signal based on the spectrogram. Connects local maxima for each frequency bin of the spectrogram matrix, . Looks for vertical lines of a given length within the frequency content. The goal is to find broadband noises within the signal (fractures). Inputs: chan - (nparray) input signal array optional gap_thresh (int) the maximum number of freq bins that can be skipped, while still considering the ridge line connected. min_length (int) the minumum length of ridge lines to be considered a fracture. Outputs: fractures (list) indicies of the identified fractures within the signal ''' fractures = [] Pxx, freqs, bins, im = plt.specgram(chan, NFFT=512, Fs=48000, noverlap=0) ridge_lines = identify_ridge_lines(Pxx, 0*np.ones(len(bins)), gap_thresh) for x in ridge_lines: if len(x[1]) > min_length: fractures.append(bins[x[1][0]]) plt.plot(bins[x[1][-10:]],freqs[len(freqs)-x[0][-10:]-1],'b') return fractures def identify_ridge_lines(matr, max_distances, gap_thresh): """ Identify ridges in the 2D matrix. Expect that the width of the wavelet feature increases with increasing row number. Parameters ---------- matr: 2-D ndarray Matrix in which to identify ridge lines. max_distances: 1-D sequence At each row, a ridge line is only connected if the relative max at row[n] is within `max_distances`[n] from the relative max at row[n+1]. gap_thresh: int If a relative maximum is not found within `max_distances`, there will be a gap. A ridge line is discontinued if there are more than `gap_thresh` points without connecting a new relative maximum. Returns ------- ridge_lines: tuple tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none found. Each ridge-line will be sorted by row (increasing), but the order of the ridge lines is not specified References ---------- Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355 http://bioinformatics.oxfordjournals.org/content/22/17/2059.long Examples -------- >>> data = np.random.rand(5,5) >>> ridge_lines = identify_ridge_lines(data, 1, 1) Notes: ------ This function is intended to be used in conjuction with `cwt` as part of find_peaks_cwt. """ if(len(max_distances) < matr.shape[0]): raise ValueError('Max_distances must have at least as many rows as matr') all_max_cols = boolrelextrema(matr, np.greater, axis=1, order=1) #Highest row for which there are any relative maxima has_relmax = np.where(all_max_cols.any(axis=1))[0] if(len(has_relmax) == 0): return [] start_row = has_relmax[-1] #Each ridge line is a 3-tuple: #rows, cols,Gap number ridge_lines = [[[start_row], [col], 0] for col in np.where(all_max_cols[start_row])[0]] final_lines = [] rows = np.arange(start_row - 1, -1, -1) cols = np.arange(0, matr.shape[1]) for row in rows: this_max_cols = cols[all_max_cols[row]] #Increment gap number of each line, #set it to zero later if appropriate for line in ridge_lines: line[2] += 1 #XXX These should always be all_max_cols[row] #But the order might be different. Might be an efficiency gain #to make sure the order is the same and avoid this iteration prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines]) #Look through every relative maximum found at current row #Attempt to connect them with existing ridge lines. for ind, col in enumerate(this_max_cols): """ If there is a previous ridge line within the max_distance to connect to, do so. Otherwise start a new one. """ line = None if(len(prev_ridge_cols) > 0): diffs = np.abs(col - prev_ridge_cols) closest = np.argmin(diffs) if diffs[closest] <= max_distances[row]: line = ridge_lines[closest] if(line is not None): #Found a point close enough, extend current ridge line line[1].append(col) line[0].append(row) line[2] = 0 else: new_line = [[row], [col], 0] ridge_lines.append(new_line) #Remove the ridge lines with gap_number too high #XXX Modifying a list while iterating over it. #Should be safe, since we iterate backwards, but #still tacky. for ind in range(len(ridge_lines) - 1, -1, -1): line = ridge_lines[ind] if line[2] > gap_thresh: final_lines.append(line) del ridge_lines[ind] out_lines = [] for line in (final_lines + ridge_lines): sortargs = np.array(np.argsort(line[0])) rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs) rows[sortargs] = line[0] cols[sortargs] = line[1] out_lines.append([rows, cols]) return out_lines def boolrelextrema(data, comparator, axis=0, order=1, mode='clip'): """ Calculate the relative extrema of `data`. Relative extrema are calculated by finding locations where comparator(data[n],data[n+1:n+order+1]) = True. Parameters ---------- data: ndarray comparator: function function to use to compare two data points. Should take 2 numbers as arguments axis: int, optional axis over which to select from `data` order: int, optional How many points on each side to require a `comparator`(n,n+x) = True. mode: string, optional How the edges of the vector are treated. 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default 'clip'. See numpy.take Returns ------- extrema: ndarray Indices of the extrema, as boolean array of same shape as data. True for an extrema, False else. See also -------- argrelmax,argrelmin Examples -------- >>> testdata = np.array([1,2,3,2,1]) >>> argrelextrema(testdata, np.greater, axis=0) array([False, False, True, False, False], dtype=bool) """ if((int(order) != order) or (order < 1)): raise ValueError('Order must be an int >= 1') datalen = data.shape[axis] locs = np.arange(0, datalen) results = np.ones(data.shape, dtype=bool) main = data.take(locs, axis=axis, mode=mode) for shift in range(1, order + 1): plus = data.take(locs + shift, axis=axis, mode=mode) minus = data.take(locs - shift, axis=axis, mode=mode) results &= comparator(main, plus) results &= comparator(main, minus) if(~results.any()): return results return results
# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import timeutils import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import utils from cinder import volume CONF = cfg.CONF LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('volume', 'services') class ServiceController(wsgi.Controller): def __init__(self, ext_mgr=None): self.ext_mgr = ext_mgr super(ServiceController, self).__init__() self.volume_api = volume.API() def index(self, req): """Return a list of all running services. Filter by host & service name. """ context = req.environ['cinder.context'] authorize(context, action='index') detailed = self.ext_mgr.is_loaded('os-extended-services') now = timeutils.utcnow(with_timezone=True) filters = {} if 'host' in req.GET: filters['host'] = req.GET['host'] if 'binary' in req.GET: filters['binary'] = req.GET['binary'] elif 'service' in req.GET: filters['binary'] = req.GET['service'] versionutils.report_deprecated_feature(LOG, _( "Query by service parameter is deprecated. " "Please use binary parameter instead.")) services = objects.ServiceList.get_all(context, filters) svcs = [] for svc in services: updated_at = svc.updated_at delta = now - (svc.updated_at or svc.created_at) delta_sec = delta.total_seconds() if svc.modified_at: delta_mod = now - svc.modified_at if abs(delta_sec) >= abs(delta_mod.total_seconds()): updated_at = svc.modified_at alive = abs(delta_sec) <= CONF.service_down_time art = (alive and "up") or "down" active = 'enabled' if svc.disabled: active = 'disabled' if updated_at: updated_at = timeutils.normalize_time(updated_at) ret_fields = {'binary': svc.binary, 'host': svc.host, 'zone': svc.availability_zone, 'status': active, 'state': art, 'updated_at': updated_at} if detailed: ret_fields['disabled_reason'] = svc.disabled_reason if svc.binary == "cinder-volume": ret_fields['replication_status'] = svc.replication_status ret_fields['active_backend_id'] = svc.active_backend_id ret_fields['frozen'] = svc.frozen svcs.append(ret_fields) return {'services': svcs} def _is_valid_as_reason(self, reason): if not reason: return False try: utils.check_string_length(reason, 'Disabled reason', min_length=1, max_length=255, allow_all_spaces=False) except exception.InvalidInput: return False return True def _freeze(self, context, host): return self.volume_api.freeze_host(context, host) def _thaw(self, context, host): return self.volume_api.thaw_host(context, host) def _failover(self, context, host, backend_id=None): return self.volume_api.failover_host(context, host, backend_id) def update(self, req, id, body): """Enable/Disable scheduling for a service. Includes Freeze/Thaw which sends call down to drivers and allows volume.manager for the specified host to disable the service rather than accessing the service directly in this API layer. """ context = req.environ['cinder.context'] authorize(context, action='update') ext_loaded = self.ext_mgr.is_loaded('os-extended-services') ret_val = {} if id == "enable": disabled = False status = "enabled" if ext_loaded: ret_val['disabled_reason'] = None elif (id == "disable" or (id == "disable-log-reason" and ext_loaded)): disabled = True status = "disabled" elif id == "freeze": return self._freeze(context, body['host']) elif id == "thaw": return self._thaw(context, body['host']) elif id == "failover_host": self._failover( context, body['host'], body.get('backend_id', None) ) return webob.Response(status_int=202) else: raise webob.exc.HTTPNotFound(explanation=_("Unknown action")) try: host = body['host'] except (TypeError, KeyError): msg = _("Missing required element 'host' in request body.") raise webob.exc.HTTPBadRequest(explanation=msg) ret_val['disabled'] = disabled if id == "disable-log-reason" and ext_loaded: reason = body.get('disabled_reason') if not self._is_valid_as_reason(reason): msg = _('Disabled reason contains invalid characters ' 'or is too long') raise webob.exc.HTTPBadRequest(explanation=msg) ret_val['disabled_reason'] = reason # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. service = body.get('service', '') binary = body.get('binary', '') binary_key = binary or service if not binary_key: raise webob.exc.HTTPBadRequest() try: svc = objects.Service.get_by_args(context, host, binary_key) if not svc: raise webob.exc.HTTPNotFound(explanation=_('Unknown service')) svc.disabled = ret_val['disabled'] if 'disabled_reason' in ret_val: svc.disabled_reason = ret_val['disabled_reason'] svc.save() except exception.ServiceNotFound: raise webob.exc.HTTPNotFound(explanation=_("service not found")) ret_val.update({'host': host, 'service': service, 'binary': binary, 'status': status}) return ret_val class Services(extensions.ExtensionDescriptor): """Services support.""" name = "Services" alias = "os-services" updated = "2012-10-28T00:00:00-00:00" def get_resources(self): resources = [] controller = ServiceController(self.ext_mgr) resource = extensions.ResourceExtension('os-services', controller) resources.append(resource) return resources
#!/usr/bin/python # # Copyright 2017 Google Inc. # Copyright 2018 Open GEE Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The search_manager module. Classes for managing search (gesearch/gepoi) databases. """ import logging from common import postgres_manager from common import postgres_properties from serve import constants from serve import http_io from serve.push.search.util import search_schema_table_util # Create logger. logger = logging.getLogger("ge_search_publisher") class SearchManager(object): """Class for managing search databases.""" def __init__(self): """Inits SearchManager.""" # Note: added as a good practice since we use SearchManager as a base class. super(SearchManager, self).__init__() # Init database connections self._host = '/tmp' self._search_database = "gesearch" self._poi_database = "gepoi" self._db_user = "geuser" postgres_prop = postgres_properties.PostgresProperties() self._port = postgres_prop.GetPortNumber() # Create DB connection to gesearch database. self._search_db_connection = postgres_manager.PostgresConnection( self._search_database, self._db_user, self._host, self._port, logger) # Create DB connection to gepoi database. self._poi_db_connection = postgres_manager.PostgresConnection( self._poi_database, self._db_user, self._host, self._port, logger) # Create search schema table utility. self.table_utility = search_schema_table_util.SearchSchemaTableUtil( self._poi_db_connection) def HandlePingRequest(self, request, response): """Handles ping database request. Args: request: request object. response: response object Raises: psycopg2.Error/Warning. """ cmd = request.GetParameter(constants.CMD) assert cmd == "Ping" # Fire off a pinq query to make sure we have a valid db connection. query_string = "SELECT 'ping'" results = self._DbQuery(query_string) if results and results[0] == "ping": http_io.ResponseWriter.AddBodyElement( response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS) else: http_io.ResponseWriter.AddBodyElement( response, constants.HDR_STATUS_MESSAGE, "Cannot ping gesearch database.") http_io.ResponseWriter.AddBodyElement( response, constants.HDR_STATUS_CODE, constants.STATUS_FAILURE) def QueryDbId(self, client_host_name, db_name): """Queries database ID by name. Args: client_host_name: Fusion client host name (part of database ID). db_name: database name. Returns: database ID. Raises: psycopg2.Error/Warning """ db_id = 0 query_string = ("SELECT db_id FROM db_table " "WHERE host_name = %s AND db_name = %s") result = self._DbQuery(query_string, (client_host_name, db_name)) if result: db_id = int(result[0]) return db_id def QueryListDbs(self): query_string = "SELECT host_name, db_name FROM db_table" results = self._DbQuery(query_string) return results def _QuerySearchDefId(self, search_def_name): """Queries search definition ID by name. Args: search_def_name: search definition name. Returns: search definition ID or 0 if search definition is not found. Raises: psycopg2.Error/Warning """ search_def_id = 0 query_string = ("SELECT search_def_id FROM search_def_table" " WHERE search_def_name = %s") result = self._DbQuery(query_string, (search_def_name,)) if result: assert isinstance(result, list) and len(result) == 1 search_def_id = result[0] return search_def_id def GetSearchDefDetails(self, search_def_name): """Gets search definition details by name. Args: search_def_name: search definitions name. Raises: psycopg2.Error/Warning. Returns: Search definition content record from search_def_table or None if search definition is not found. """ # Get search def info from search_def_table. query_string = ("SELECT search_def_content" " FROM search_def_table WHERE search_def_name = %s") result = self._DbQuery(query_string, (search_def_name,)) if not result: return None assert isinstance(result, list) and len(result) == 1 return result[0] def _DbQuery(self, query_string, parameters=None): """Handles DB query request to gesearch database. Args: query_string: SQL query statement. parameters: sequence of parameters to populate into placeholders. Returns: Results as list of tuples (rows of fields). Raises: psycopg2.Error/Warning in case of error. """ return self._search_db_connection.Query(query_string, parameters) def _DbModify(self, command_string, parameters=None): """Handles DB modify request to gesearch database. Args: command_string: SQL UPDATE/INSERT/DELETE command string. parameters: sequence of parameters to populate into placeholders. Returns: Number of rows that sql command affected. Raises: psycopg2.Error/Warning in case of error. """ return self._search_db_connection.Modify(command_string, parameters) def _QueryPoiId(self, host_name, poi_file_path): """Queries POI ID for hostname:poi_file_path. Args: host_name: host name. it is a part of database identifier. poi_file_path: POI file path. Returns: POI ID for the pair hostname:poi_file_path. Raises: psycopg2.Error/Warning """ poi_id = 0 query_string = ("SELECT poi_id FROM poi_table " "WHERE host_name = %s AND poi_file_path = %s") results = self._DbQuery(query_string, [host_name, poi_file_path]) if results: poi_id = results[0] return poi_id def main(): pass if __name__ == "__main__": main()
# -*- coding: utf-8 -*- """ =========== octavemagic =========== Magics for interacting with Octave via oct2py. .. note:: The ``oct2py`` module needs to be installed separately and can be obtained using ``easy_install`` or ``pip``. Usage ===== ``%octave`` {OCTAVE_DOC} ``%octave_push`` {OCTAVE_PUSH_DOC} ``%octave_pull`` {OCTAVE_PULL_DOC} """ #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- import tempfile from glob import glob from shutil import rmtree import numpy as np import oct2py from xml.dom import minidom from IPython.core.displaypub import publish_display_data from IPython.core.magic import (Magics, magics_class, line_magic, line_cell_magic, needs_local_scope) from IPython.testing.skipdoctest import skip_doctest from IPython.core.magic_arguments import ( argument, magic_arguments, parse_argstring ) from IPython.utils.py3compat import unicode_to_str class OctaveMagicError(oct2py.Oct2PyError): pass _mimetypes = {'png' : 'image/png', 'svg' : 'image/svg+xml', 'jpg' : 'image/jpeg', 'jpeg': 'image/jpeg'} @magics_class class OctaveMagics(Magics): """A set of magics useful for interactive work with Octave via oct2py. """ def __init__(self, shell): """ Parameters ---------- shell : IPython shell """ super(OctaveMagics, self).__init__(shell) self._oct = oct2py.Oct2Py() self._plot_format = 'png' # Allow publish_display_data to be overridden for # testing purposes. self._publish_display_data = publish_display_data def _fix_gnuplot_svg_size(self, image, size=None): """ GnuPlot SVGs do not have height/width attributes. Set these to be the same as the viewBox, so that the browser scales the image correctly. Parameters ---------- image : str SVG data. size : tuple of int Image width, height. """ (svg,) = minidom.parseString(image).getElementsByTagName('svg') viewbox = svg.getAttribute('viewBox').split(' ') if size is not None: width, height = size else: width, height = viewbox[2:] svg.setAttribute('width', '%dpx' % width) svg.setAttribute('height', '%dpx' % height) return svg.toxml() @skip_doctest @line_magic def octave_push(self, line): ''' Line-level magic that pushes a variable to Octave. `line` should be made up of whitespace separated variable names in the IPython namespace:: In [7]: import numpy as np In [8]: X = np.arange(5) In [9]: X.mean() Out[9]: 2.0 In [10]: %octave_push X In [11]: %octave mean(X) Out[11]: 2.0 ''' inputs = line.split(' ') for input in inputs: input = unicode_to_str(input) self._oct.put(input, self.shell.user_ns[input]) @skip_doctest @line_magic def octave_pull(self, line): ''' Line-level magic that pulls a variable from Octave. In [18]: _ = %octave x = [1 2; 3 4]; y = 'hello' In [19]: %octave_pull x y In [20]: x Out[20]: array([[ 1., 2.], [ 3., 4.]]) In [21]: y Out[21]: 'hello' ''' outputs = line.split(' ') for output in outputs: output = unicode_to_str(output) self.shell.push({output: self._oct.get(output)}) @skip_doctest @magic_arguments() @argument( '-i', '--input', action='append', help='Names of input variables to be pushed to Octave. Multiple names ' 'can be passed, separated by commas with no whitespace.' ) @argument( '-o', '--output', action='append', help='Names of variables to be pulled from Octave after executing cell ' 'body. Multiple names can be passed, separated by commas with no ' 'whitespace.' ) @argument( '-s', '--size', action='store', help='Pixel size of plots, "width,height". Default is "-s 400,250".' ) @argument( '-f', '--format', action='store', help='Plot format (png, svg or jpg).' ) @needs_local_scope @argument( 'code', nargs='*', ) @line_cell_magic def octave(self, line, cell=None, local_ns=None): ''' Execute code in Octave, and pull some of the results back into the Python namespace. In [9]: %octave X = [1 2; 3 4]; mean(X) Out[9]: array([[ 2., 3.]]) As a cell, this will run a block of Octave code, without returning any value:: In [10]: %%octave ....: p = [-2, -1, 0, 1, 2] ....: polyout(p, 'x') -2*x^4 - 1*x^3 + 0*x^2 + 1*x^1 + 2 In the notebook, plots are published as the output of the cell, e.g. %octave plot([1 2 3], [4 5 6]) will create a line plot. Objects can be passed back and forth between Octave and IPython via the -i and -o flags in line:: In [14]: Z = np.array([1, 4, 5, 10]) In [15]: %octave -i Z mean(Z) Out[15]: array([ 5.]) In [16]: %octave -o W W = Z * mean(Z) Out[16]: array([ 5., 20., 25., 50.]) In [17]: W Out[17]: array([ 5., 20., 25., 50.]) The size and format of output plots can be specified:: In [18]: %%octave -s 600,800 -f svg ...: plot([1, 2, 3]); ''' args = parse_argstring(self.octave, line) # arguments 'code' in line are prepended to the cell lines if cell is None: code = '' return_output = True else: code = cell return_output = False code = ' '.join(args.code) + code # if there is no local namespace then default to an empty dict if local_ns is None: local_ns = {} if args.input: for input in ','.join(args.input).split(','): input = unicode_to_str(input) try: val = local_ns[input] except KeyError: val = self.shell.user_ns[input] self._oct.put(input, val) # generate plots in a temporary directory plot_dir = tempfile.mkdtemp().replace('\\', '/') if args.size is not None: size = args.size else: size = '400,240' if args.format is not None: plot_format = args.format else: plot_format = 'png' pre_call = ''' global __ipy_figures = []; page_screen_output(0); function fig_create(src, event) global __ipy_figures; __ipy_figures(size(__ipy_figures) + 1) = src; set(src, "visible", "off"); end set(0, 'DefaultFigureCreateFcn', @fig_create); close all; clear ans; # ___<end_pre_call>___ # ''' post_call = ''' # ___<start_post_call>___ # # Save output of the last execution if exist("ans") == 1 _ = ans; else _ = nan; end for f = __ipy_figures outfile = sprintf('%(plot_dir)s/__ipy_oct_fig_%%03d.png', f); try print(f, outfile, '-d%(plot_format)s', '-tight', '-S%(size)s'); end end ''' % locals() code = ' '.join((pre_call, code, post_call)) try: text_output = self._oct.run(code, verbose=False) except (oct2py.Oct2PyError) as exception: msg = exception.message msg = msg.split('# ___<end_pre_call>___ #')[1] msg = msg.split('# ___<start_post_call>___ #')[0] raise OctaveMagicError('Octave could not complete execution. ' 'Traceback (currently broken in oct2py): %s' % msg) key = 'OctaveMagic.Octave' display_data = [] # Publish text output if text_output: display_data.append((key, {'text/plain': text_output})) # Publish images images = [open(imgfile, 'rb').read() for imgfile in \ glob("%s/*" % plot_dir)] rmtree(plot_dir) plot_mime_type = _mimetypes.get(plot_format, 'image/png') width, height = [int(s) for s in size.split(',')] for image in images: if plot_format == 'svg': image = self._fix_gnuplot_svg_size(image, size=(width, height)) display_data.append((key, {plot_mime_type: image})) if args.output: for output in ','.join(args.output).split(','): output = unicode_to_str(output) self.shell.push({output: self._oct.get(output)}) for source, data in display_data: self._publish_display_data(source, data) if return_output: ans = self._oct.get('_') # Unfortunately, Octave doesn't have a "None" object, # so we can't return any NaN outputs if np.isscalar(ans) and np.isnan(ans): ans = None return ans __doc__ = __doc__.format( OCTAVE_DOC = ' '*8 + OctaveMagics.octave.__doc__, OCTAVE_PUSH_DOC = ' '*8 + OctaveMagics.octave_push.__doc__, OCTAVE_PULL_DOC = ' '*8 + OctaveMagics.octave_pull.__doc__ ) def load_ipython_extension(ip): """Load the extension in IPython.""" ip.register_magics(OctaveMagics)
#!/usr/bin/env python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a build from the given commands. A command is either an addition or a subtraction. An addition is prefixed with a +; a subtraction is when prefixed with a -. After the character, there is a name of a file or a @ sign and the name of a build file. Build files are the files found in build/types. These files are simply a newline separated list of commands to execute. So if the "+@complete" command is given, it will open the complete file and run it (which may in turn open other build files). Subtracting a build file will reverse all actions applied by the given file. So "-@networking" will remove all the networking plugins. The core library is always included so does not have to be listed. The default is to use the name 'compiled'; if no commands are given, it will build the complete build. Examples: # Equivalent to +@complete build.py build.py +@complete build.py +@complete -@networking build.py --name custom +@manifests +@networking +../my_plugin.js """ import argparse import logging import os import re import sys import shakaBuildHelpers common_closure_opts = [ '--language_in', 'ECMASCRIPT6', '--language_out', 'ECMASCRIPT3', '--jscomp_error=*', '--extra_annotation_name=listens', '--extra_annotation_name=exportDoc', '--extra_annotation_name=exportInterface', '--conformance_configs', ('%s/build/conformance.textproto' % shakaBuildHelpers.cygwin_safe_path(shakaBuildHelpers.get_source_base())), '--generate_exports', ] common_closure_defines = [ '-D', 'COMPILED=true', '-D', 'goog.STRICT_MODE_COMPATIBLE=true', '-D', 'goog.ENABLE_DEBUG_LOADER=false', ] debug_closure_opts = [ # Don't use a wrapper script in debug mode so all the internals are visible # on the global object. '-O', 'SIMPLE', ] debug_closure_defines = [ '-D', 'goog.DEBUG=true', '-D', 'goog.asserts.ENABLE_ASSERTS=true', '-D', 'shaka.log.MAX_LOG_LEVEL=4', # shaka.log.Level.DEBUG '-D', 'shaka.Player.version="%s-debug"' % ( shakaBuildHelpers.calculate_version()), ] release_closure_opts = [ ('--output_wrapper_file=%s/build/wrapper.template.js' % shakaBuildHelpers.cygwin_safe_path(shakaBuildHelpers.get_source_base())), '-O', 'ADVANCED', ] release_closure_defines = [ '-D', 'goog.DEBUG=false', '-D', 'goog.asserts.ENABLE_ASSERTS=false', '-D', 'shaka.log.MAX_LOG_LEVEL=0', '-D', 'shaka.Player.version="%s"' % shakaBuildHelpers.calculate_version(), ] class Build(object): """Defines a build that has been parsed from a build file. This has exclude files even though it will not be used at the top-level. This allows combining builds. A file will only exist in at most one set. Members: include - A set of files to include. exclude - A set of files to remove. """ def __init__(self, include=None, exclude=None): self.include = include or set() self.exclude = exclude or set() def _get_build_file_path(self, name, root): """Gets the full path to a build file, if it exists. Args: name: The string name to check. root: The full path to the base directory. Returns: The full path to the build file, or None if not found. """ source_base = shakaBuildHelpers.get_source_base() local_path = os.path.join(root, name) build_path = os.path.join(source_base, 'build', 'types', name) if (os.path.isfile(local_path) and os.path.isfile(build_path) and local_path != build_path): logging.error('Build file "%s" is ambiguous', name) return None elif os.path.isfile(local_path): return local_path elif os.path.isfile(build_path): return build_path else: logging.error('Build file not found: %s', name) return None def _combine(self, other): include_all = self.include | other.include exclude_all = self.exclude | other.exclude self.include = include_all - exclude_all self.exclude = exclude_all - include_all def reverse(self): return Build(self.exclude, self.include) def add_core(self): """Adds the core library.""" # Add externs and closure dependencies. source_base = shakaBuildHelpers.get_source_base() match = re.compile(r'.*\.js$') self.include |= set( shakaBuildHelpers.get_all_files( os.path.join(source_base, 'externs'), match) + shakaBuildHelpers.get_all_files( os.path.join(source_base, 'third_party', 'closure'), match)) # Check that there are no files in 'core' that are removed core_build = Build() core_build.parse_build(['+@core'], os.getcwd()) core_files = core_build.include if self.exclude & core_files: logging.error('Cannot exclude files from core') self.include |= core_files def parse_build(self, lines, root): """Parses a Build object from the given lines of commands. This will recursively read and parse builds. Args: lines: An array of strings defining commands. root: The full path to the base directory. Returns: True on success, False otherwise. """ for line in lines: # Strip comments try: line = line[:line.index('#')] except ValueError: pass # Strip whitespace and ignore empty lines. line = line.strip() if not line: continue if line[0] == '+': is_neg = False line = line[1:].strip() elif line[0] == '-': is_neg = True line = line[1:].strip() else: logging.error('Operation (+/-) required') return False if line[0] == '@': line = line[1:].strip() build_path = self._get_build_file_path(line, root) if not build_path: return False lines = open(build_path).readlines() sub_root = os.path.dirname(build_path) # If this is a build file, then recurse and combine the builds. sub_build = Build() if not sub_build.parse_build(lines, sub_root): return False if is_neg: self._combine(sub_build.reverse()) else: self._combine(sub_build) else: if not os.path.isabs(line): line = os.path.abspath(os.path.join(root, line)) if not os.path.isfile(line): logging.error('Unable to find file: %s', line) return False if is_neg: self.include.discard(line) self.exclude.add(line) else: self.include.add(line) self.exclude.discard(line) return True def build_raw(self, closure_opts): """Builds the files in |self.include| using the given extra Closure options. Args: closure_opts: An array of options to give to Closure. Returns: True on success; False on failure. """ jar = os.path.join(shakaBuildHelpers.get_source_base(), 'third_party', 'closure', 'compiler.jar') jar = shakaBuildHelpers.cygwin_safe_path(jar) files = [shakaBuildHelpers.cygwin_safe_path(f) for f in self.include] files.sort() cmd_line = ['java', '-jar', jar] + closure_opts + files if shakaBuildHelpers.execute_get_code(cmd_line) != 0: logging.error('Build failed') return False return True def generate_externs(self, name): """Generates externs for the files in |self.include|. Args: name: The name of the build. Returns: True on success; False on failure. """ files = [shakaBuildHelpers.cygwin_safe_path(f) for f in self.include] extern_generator = shakaBuildHelpers.cygwin_safe_path(os.path.join( shakaBuildHelpers.get_source_base(), 'build', 'generateExterns.js')) output = shakaBuildHelpers.cygwin_safe_path(os.path.join( shakaBuildHelpers.get_source_base(), 'dist', 'shaka-player.' + name + '.externs.js')) cmd_line = ['node', extern_generator, '--output', output] + files if shakaBuildHelpers.execute_get_code(cmd_line) != 0: logging.error('Externs generation failed') return False return True def build_library(self, name, rebuild, is_debug): """Builds Shaka Player using the files in |self.include|. Args: name: The name of the build. rebuild: True to rebuild, False to ignore if no changes are detected. is_debug: True to compile for debugging, false for release. Returns: True on success; False on failure. """ self.add_core() # In the build files, we use '/' in the paths, however Windows uses '\'. # Although Windows supports both, the source mapping will not work. So # use Linux-style paths for arguments. source_base = shakaBuildHelpers.get_source_base().replace('\\', '/') if is_debug: name += '.debug' result_file, result_map = compute_output_files('shaka-player.' + name) # Don't build if we don't have to. if not rebuild and not self.should_build(result_file): return True closure_opts = common_closure_opts + common_closure_defines if is_debug: closure_opts += debug_closure_opts + debug_closure_defines else: closure_opts += release_closure_opts + release_closure_defines closure_opts += [ '--create_source_map', result_map, '--js_output_file', result_file, '--source_map_location_mapping', source_base + '|..' ] if not self.build_raw(closure_opts): return False self.add_source_map(result_file, result_map) if not self.generate_externs(name): return False return True def add_source_map(self, result_file, result_map): # Add a special source-mapping comment so that Chrome and Firefox can map # line and character numbers from the compiled library back to the original # source locations. with open(result_file, 'a') as f: f.write('//# sourceMappingURL=%s' % os.path.basename(result_map)) def should_build(self, result_file): if not os.path.isfile(result_file): # Nothing built, so we should definitely build. return True # Detect changes to the set of files that we intend to build. build_time = os.path.getmtime(result_file) # Get a list of files modified since the result file was created. edited_files = [f for f in self.include if os.path.getmtime(f) > build_time] if edited_files: # Some input files have changed, so we should build again. return True logging.warning('No changes detected, not building. Use --force ' 'to override.') return False def compute_output_files(base_name): source_base = shakaBuildHelpers.get_source_base().replace('\\', '/') prefix = shakaBuildHelpers.cygwin_safe_path( os.path.join(source_base, 'dist', base_name)) js_path = prefix + '.js' map_path = prefix + '.map' return js_path, map_path def compile_demo(rebuild, is_debug): """Compile the demo application. Args: rebuild: True to rebuild, False to ignore if no changes are detected. is_debug: True to compile for debugging, false for release. Returns: True on success, False on failure. """ logging.info('Compiling the demo app (%s)...', 'debug' if is_debug else 'release') match = re.compile(r'.*\.js$') base = shakaBuildHelpers.get_source_base() def get(*args): return shakaBuildHelpers.get_all_files(os.path.join(base, *args), match) files = set(get('demo') + get('externs')) - set(get('demo/cast_receiver')) # Make sure we don't compile in load.js, which will be used to bootstrap # everything else. If we build that into the output, we will get an infinite # loop of scripts adding themselves. files.remove(os.path.join(base, 'demo', 'load.js')) # Remove service_worker.js as well. This executes in a different context. files.remove(os.path.join(base, 'demo', 'service_worker.js')) # Add in the generated externs, so that the demo compilation knows the # definitions of the library APIs. extern_name = ('shaka-player.compiled.debug.externs.js' if is_debug else 'shaka-player.compiled.externs.js') files.add(os.path.join(base, 'dist', extern_name)) demo_build = Build(files) name = 'demo.compiled' + ('.debug' if is_debug else '') result_file, result_map = compute_output_files(name) # Don't build if we don't have to. if not rebuild and not demo_build.should_build(result_file): return True source_base = shakaBuildHelpers.get_source_base().replace('\\', '/') closure_opts = common_closure_opts + debug_closure_opts closure_opts += [ # Ignore missing goog.require since we assume the whole library is # already included. '--jscomp_off=missingRequire', '--jscomp_off=strictMissingRequire', '--create_source_map', result_map, '--js_output_file', result_file, '--source_map_location_mapping', source_base + '|..', '-D', 'COMPILED=true', ] if not demo_build.build_raw(closure_opts): return False demo_build.add_source_map(result_file, result_map) return True def compile_receiver(rebuild, is_debug): """Compile the cast receiver application. Args: rebuild: True to rebuild, False to ignore if no changes are detected. is_debug: True to compile for debugging, false for release. Returns: True on success, False on failure. """ logging.info('Compiling the receiver app (%s)...', 'debug' if is_debug else 'release') match = re.compile(r'.*\.js$') base = shakaBuildHelpers.get_source_base() def get(*args): return shakaBuildHelpers.get_all_files(os.path.join(base, *args), match) files = set(get('demo/common') + get('demo/cast_receiver') + get('externs')) # Add in the generated externs, so that the receiver compilation knows the # definitions of the library APIs. extern_name = ('shaka-player.compiled.debug.externs.js' if is_debug else 'shaka-player.compiled.externs.js') files.add(os.path.join(base, 'dist', extern_name)) receiver_build = Build(files) name = 'receiver.compiled' + ('.debug' if is_debug else '') result_file, result_map = compute_output_files(name) # Don't build if we don't have to. if not rebuild and not receiver_build.should_build(result_file): return True source_base = shakaBuildHelpers.get_source_base().replace('\\', '/') closure_opts = common_closure_opts + debug_closure_opts closure_opts += [ # Ignore missing goog.require since we assume the whole library is # already included. '--jscomp_off=missingRequire', '--jscomp_off=strictMissingRequire', '--create_source_map', result_map, '--js_output_file', result_file, '--source_map_location_mapping', source_base + '|..', '-D', 'COMPILED=true', ] if not receiver_build.build_raw(closure_opts): return False receiver_build.add_source_map(result_file, result_map) return True def main(args): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--force', '-f', help='Force building the library even if no files have changed.', action='store_true') parser.add_argument( '--mode', help='Specify which build mode to use.', choices=['debug', 'release'], default='release') parser.add_argument( '--debug', help='Same as using "--mode debug".', action='store_const', dest='mode', const='debug') parser.add_argument( '--name', help='Set the name of the build. Uses "compiled" if not given.', type=str, default='compiled') parsed_args, commands = parser.parse_known_args(args) # If no commands are given then use complete by default. if len(commands) == 0: commands.append('+@complete') logging.info('Compiling the library (%s)...', parsed_args.mode) custom_build = Build() if not custom_build.parse_build(commands, os.getcwd()): return 1 # Update node modules if needed. if not shakaBuildHelpers.update_node_modules(): return 1 name = parsed_args.name rebuild = parsed_args.force is_debug = parsed_args.mode == 'debug' if not custom_build.build_library(name, rebuild, is_debug): return 1 if not compile_demo(rebuild, is_debug): return 1 if not compile_receiver(rebuild, is_debug): return 1 return 0 if __name__ == '__main__': shakaBuildHelpers.run_main(main)
from __future__ import print_function, division import inspect from sympy.utilities import default_sort_key from sympy.external import import_module from sympy.printing.printer import Printer import sympy from functools import partial theano = import_module('theano') if theano: ts = theano.scalar tt = theano.tensor from theano import sandbox from theano.sandbox import linalg as tlinalg mapping = { sympy.Add: tt.add, sympy.Mul: tt.mul, sympy.Abs: tt.abs_, sympy.sign: tt.sgn, sympy.ceiling: tt.ceil, sympy.floor: tt.floor, sympy.log: tt.log, sympy.exp: tt.exp, sympy.sqrt: tt.sqrt, sympy.cos: tt.cos, sympy.acos: tt.arccos, sympy.sin: tt.sin, sympy.asin: tt.arcsin, sympy.tan: tt.tan, sympy.atan: tt.arctan, sympy.atan2: tt.arctan2, sympy.cosh: tt.cosh, sympy.acosh: tt.arccosh, sympy.sinh: tt.sinh, sympy.asinh: tt.arcsinh, sympy.tanh: tt.tanh, sympy.atanh: tt.arctanh, sympy.re: tt.real, sympy.im: tt.imag, sympy.arg: tt.angle, sympy.erf: tt.erf, sympy.gamma: tt.gamma, sympy.loggamma: tt.gammaln, sympy.Pow: tt.pow, sympy.Eq: tt.eq, sympy.StrictGreaterThan: tt.gt, sympy.StrictLessThan: tt.lt, sympy.LessThan: tt.le, sympy.GreaterThan: tt.ge, sympy.Max: tt.maximum, # Sympy accept >2 inputs, Theano only 2 sympy.Min: tt.minimum, # Sympy accept >2 inputs, Theano only 2 # Matrices sympy.MatAdd: tt.Elemwise(ts.add), sympy.HadamardProduct: tt.Elemwise(ts.mul), sympy.Trace: tlinalg.trace, sympy.Determinant : tlinalg.det, sympy.Inverse: tlinalg.matrix_inverse, sympy.Transpose: tt.DimShuffle((False, False), [1, 0]), } class TheanoPrinter(Printer): """ Code printer for Theano computations """ printmethod = "_theano" def __init__(self, *args, **kwargs): self.cache = kwargs.pop('cache', dict()) super(TheanoPrinter, self).__init__(*args, **kwargs) def _print_Symbol(self, s, dtypes={}, broadcastables={}): dtype = dtypes.get(s, 'floatX') broadcastable = broadcastables.get(s, ()) key = (s.name, dtype, broadcastable, type(s)) if key in self.cache: return self.cache[key] else: value = tt.tensor(name=s.name, dtype=dtype, broadcastable=broadcastable) self.cache[key] = value return value def _print_AppliedUndef(self, s, dtypes={}, broadcastables={}): dtype = dtypes.get(s, 'floatX') broadcastable = broadcastables.get(s, ()) name = str(type(s)) + '_' + str(s.args[0]) key = (name, dtype, broadcastable, type(s), s.args) if key in self.cache: return self.cache[key] else: value = tt.tensor(name=name, dtype=dtype, broadcastable=broadcastable) self.cache[key] = value return value def _print_Basic(self, expr, **kwargs): op = mapping[type(expr)] children = [self._print(arg, **kwargs) for arg in expr.args] return op(*children) def _print_Number(self, n, **kwargs): return eval(str(n)) def _print_MatrixSymbol(self, X, dtypes={}, **kwargs): dtype = dtypes.get(X, 'floatX') # shape = [self._print(d, dtypes) for d in X.shape] key = (X.name, dtype, type(X)) if key in self.cache: return self.cache[key] else: value = tt.Tensor(dtype, (False, False))(X.name) self.cache[key] = value return value def _print_DenseMatrix(self, X, **kwargs): try: tt.stacklists except AttributeError: raise NotImplementedError( "Matrix translation not yet supported in this version of Theano") else: return tt.stacklists([[self._print(arg, **kwargs) for arg in L] for L in X.tolist()]) _print_ImmutableMatrix = _print_DenseMatrix def _print_MatMul(self, expr, **kwargs): children = [self._print(arg, **kwargs) for arg in expr.args] result = children[0] for child in children[1:]: result = tt.dot(result, child) return result def _print_MatrixSlice(self, expr, **kwargs): parent = self._print(expr.parent, **kwargs) rowslice = self._print(slice(*expr.rowslice), **kwargs) colslice = self._print(slice(*expr.colslice), **kwargs) return parent[rowslice, colslice] def _print_BlockMatrix(self, expr, **kwargs): nrows, ncols = expr.blocks.shape blocks = [[self._print(expr.blocks[r, c], **kwargs) for c in range(ncols)] for r in range(nrows)] return tt.join(0, *[tt.join(1, *row) for row in blocks]) def _print_slice(self, expr, **kwargs): return slice(*[self._print(i, **kwargs) if isinstance(i, sympy.Basic) else i for i in (expr.start, expr.stop, expr.step)]) def _print_Pi(self, expr, **kwargs): return 3.141592653589793 def _print_Piecewise(self, expr, **kwargs): import numpy as np e, cond = expr.args[0].args if len(expr.args) == 1: return tt.switch(self._print(cond, **kwargs), self._print(e, **kwargs), np.nan) return tt.switch(self._print(cond, **kwargs), self._print(e, **kwargs), self._print(sympy.Piecewise(*expr.args[1:]), **kwargs)) def _print_Rational(self, expr, **kwargs): return tt.true_div(self._print(expr.p, **kwargs), self._print(expr.q, **kwargs)) def _print_Integer(self, expr, **kwargs): return expr.p def _print_factorial(self, expr, **kwargs): return self._print(sympy.gamma(expr.args[0] + 1), **kwargs) def _print_Derivative(self, deriv, **kwargs): rv = self._print(deriv.expr, **kwargs) for var in deriv.variables: var = self._print(var, **kwargs) rv = tt.Rop(rv, var, tt.ones_like(var)) return rv def emptyPrinter(self, expr): return expr def doprint(self, expr, **kwargs): """Returns printer's representation for expr (as a string)""" return self._print(expr, **kwargs) global_cache = {} def theano_code(expr, cache=global_cache, **kwargs): return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs) def dim_handling(inputs, dim=None, dims={}, broadcastables={}, keys=(), **kwargs): """ Handle various input types for dimensions in tensor_wrap See Also: tensor_wrap theano_funciton """ if dim: dims = dict(zip(inputs, [dim]*len(inputs))) if dims: maxdim = max(dims.values()) broadcastables = dict((i, (False,)*dims[i] + (True,)*(maxdim-dims[i])) for i in inputs) return broadcastables def theano_function(inputs, outputs, dtypes={}, cache=None, **kwargs): """ Create Theano function from SymPy expressions """ cache = {} if cache == None else cache broadcastables = dim_handling(inputs, **kwargs) # Remove keyword arguments corresponding to dim_handling dim_names = inspect.getargspec(dim_handling)[0] theano_kwargs = dict((k, v) for k, v in kwargs.items() if k not in dim_names) code = partial(theano_code, cache=cache, dtypes=dtypes, broadcastables=broadcastables) tinputs = list(map(code, inputs)) toutputs = list(map(code, outputs)) toutputs = toutputs[0] if len(toutputs) == 1 else toutputs return theano.function(tinputs, toutputs, **theano_kwargs)
# -*- coding: utf-8 -*- import json import datetime from flask import current_app from flask.ext.testing import (TestCase as Base, Twill) from app import create_app from app.user import User, UserDetail, ADMIN, USER, ACTIVE from config import TestConfig from app.extensions import db, mail class TestCase(Base): """Base TestClass for your application.""" ENVIRON_BASE = { 'HTTP_USER_AGENT': 'Chrome', 'REMOTE_ADDR': '127.0.0.1', 'HTTP_ORIGIN': 'http://localhost:9000' } def create_app(self): """Create and return a testing flask app.""" app = create_app(TestConfig) self.twill = Twill(app, port=3000) return app def init_data(self): demo = User( username=u'demo', email='demo@example.com', password='default', role_id=USER, status_id=ACTIVE, user_detail=UserDetail( first_name=u'Demo', last_name=u'Dude', gender=u'female', dob=datetime.date(1985, 01, 17), phone=1234567890, bio=u'Demo dude is pretty sweet.', url=u'http://www.example.com/demo', ), ) admin = User( username=u'admin', email='admin@example.com', password='default', role_id=ADMIN, status_id=ACTIVE, user_detail=UserDetail( first_name=u'Admin', last_name=u'Dude', gender=u'male', dob=datetime.date(1985, 01, 17), phone=1234567890, bio=u'Admin dude is the administrator.', url=u'http://www.example.com/admin', ), ) db.session.add(demo) db.session.add(admin) db.session.commit() assert demo.id is not None assert admin.id is not None #self.user = user TODO def setUp(self): """Reset all tables before testing.""" db.create_all() self.init_data() def tearDown(self): """Clean db session and drop all tables.""" db.session.remove() db.drop_all() def login(self, email, password, remember=False): """Helper function to login""" rv = self.client.post('/session/', data={ 'email': email, 'password': password, 'remember': remember }, follow_redirects=True, environ_base=self.ENVIRON_BASE ) self.assert_200(rv) assert 'success' in rv.data return rv def logout(self): """Helper function to logout""" rv = self.client.delete('/session/', follow_redirects=True, environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data return rv def _test_get_request(self, endpoint, template=None): rv = self.client.get(endpoint) self.assert_200(rv) return rv class TestUser(TestCase): def test_delete_activate(self): # User logs in and deactivates account self.login(email='demo@example.com', password='default') active_user = User.query.filter_by(email='demo@example.com').first() assert active_user.is_active() is True rv = self.client.delete('/users/%d/' % 1, environ_base=self.ENVIRON_BASE) self.assert_200(rv) deactivated_user = User.query.filter_by(email='demo@example.com').first() assert deactivated_user.is_active() is False self.logout() # User logs in, initiating an activation self.login(email='demo@example.com', password='default') deactivated_user = User.query.filter_by(email='demo@example.com').first() # User enters new password data = {'status': 'active'} rv = self.client.put( '/users/activate/%s/%s/' % (deactivated_user.email, deactivated_user.activation_key), data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data active_user = User.query.filter_by(email='demo@example.com').first() assert active_user.is_active() is True #self.assert_405(rv) def test_get(self): self.login(email='demo@example.com', password='default') # Get a user. rv = self.client.get('/users/%d/' % 1, environ_base=self.ENVIRON_BASE) self.assert_200(rv) # Get list of users. rv = self.client.get('/users/', environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data self.logout() def test_register(self): data = { 'username': 'member', 'email': 'member@example.com', 'password': 'default', 'password_again': 'default' } rv = self.client.post('/users/', data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data new_user = User.query.filter_by(email=data['email']).first() assert new_user is not None def test_update_profile(self): self.login(email='demo@example.com', password='default') data = { 'email': 'demo@example.com', } user = User.query.filter_by(email=data.get('email')).first() assert user is not None data = { 'first_name': 'Bilbo', 'last_name': 'Baggins', 'username': user.username, 'email': user.email, 'gender': user.user_detail.gender, 'dob': user.get_dob(), 'phone': user.user_detail.phone, 'bio': user.user_detail.bio, 'url': user.user_detail.url } rv = self.client.put('/users/%d/' % user.id, data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data user = User.query.filter_by(email=data.get('email')).first() assert user.user_detail.first_name == 'Bilbo' assert user.user_detail.last_name == 'Baggins' self.logout() def test_password_reset(self): # User enters email for account that DNE data = {'email': 'missing@example.com'} response = self.client.post('/users/password/reset/', data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) assert 'Sorry, no user found for that email address.' in response.data self.assert_200(response) # User enters email and clicks 'Send instructions' data = {'email': 'demo@example.com'} user = User.query.filter_by(email=data.get('email')).first() assert user is not None assert user.activation_key is None with mail.record_messages() as outbox: # Application sends password reset email response = self.client.post('/users/password/reset/', data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) assert len(outbox) == 1 assert outbox[0].subject == "Recover your password" user = User.query.filter_by(email=data.get('email')).first() assert user.activation_key is not None # User enters new password data = { 'password': 'new password', 'password_again': 'new password', } user = User.query.filter_by(activation_key=user.activation_key) \ .filter_by(email=user.email).first() assert user is not None rv = self.client.put( '/users/password/%s/%s/' % (user.email, user.activation_key), data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data user = User.query.filter_by(email='demo@example.com').first() assert user is not None assert user.activation_key is None assert user.check_password(data.get('password')) is True def test_password_change(self): # User logs in self.login(email='demo@example.com', password='default') # User enters new password data = { 'password': 'new password', 'password_again': 'new password', } user = User.query.filter_by(email='demo@example.com').first() assert user is not None rv = self.client.put('/users/%d/' % user.id, data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) self.assert_200(rv) assert 'success' in rv.data user = User.query.filter_by(email='demo@example.com').first() assert user is not None assert user.check_password(data.get('password')) is True class TestMeta(TestCase): def test_contact(self): # User fills in form and clicks 'Send Message' data = { 'full_name': 'Troubled User', 'email': 'troubled.user@example.com', 'subject': 'Help me!', 'message': 'I have the blue screen of death! Call the doctor!', } with mail.record_messages() as outbox: # Application sends email to admin response = self.client.post('/mail/', data=json.dumps(data), content_type='application/json', environ_base=self.ENVIRON_BASE) assert len(outbox) == 1 subject = '[%s] Message from %s: %s' % (current_app.config['APP_NAME'], data.get('full_name'), data.get('subject')) assert outbox[0].subject == subject self.assert_200(response) # TODO assert "Thanks for your message. We'll get back to you shortly." in response.data class TestErrors(TestCase): def test_401(self): response = self.client.get('/users/1/', environ_base=self.ENVIRON_BASE) self.assert_401(response) def test_403(self): response = self.client.get('/session/', environ_base={ 'HTTP_ORIGIN': 'http://malicious.com' }) self.assert_403(response) def test_404(self): response = self.client.get('/i-am-not-found/', environ_base=self.ENVIRON_BASE) self.assert_404(response) def test_405(self): response = self.client.post('/', data={}, content_type='application/json', environ_base=self.ENVIRON_BASE) self.assert_405(response) def test_400(self): response = self.client.get('/session/', environ_base={ 'HTTP_ORIGINS': 'http://localhost:6666' }) self.assert_400(response) def test_static_text_file_request(self): response = self.client.get('/robots.txt') self.assert_200(response)